1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <sys/sdt.h>
31
32 static void
nx_netif_filter_tx_pkt_enqueue(struct nexus_netif_adapter * nifna,struct __kern_packet * pkt_chain)33 nx_netif_filter_tx_pkt_enqueue(struct nexus_netif_adapter *nifna,
34 struct __kern_packet *pkt_chain)
35 {
36 struct __kern_packet *fpkt_chain;
37 struct nx_netif *nif = nifna->nifna_netif;
38 struct netif_stats *nifs = &nif->nif_stats;
39
40 if (nif->nif_filter_cnt == 0) {
41 int dropcnt = 0;
42
43 nx_netif_free_packet_chain(pkt_chain, &dropcnt);
44 DTRACE_SKYWALK2(pkt__default__drop, struct nx_netif *, nif,
45 int, dropcnt);
46 STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_DEFAULT, dropcnt);
47 STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
48 return;
49 }
50 fpkt_chain = nx_netif_pkt_to_filter_pkt_chain(nifna, pkt_chain,
51 NETIF_CONVERT_TX);
52 if (fpkt_chain == NULL) {
53 return;
54 }
55 (void) nx_netif_filter_inject(nifna, NULL, fpkt_chain,
56 NETIF_FILTER_TX | NETIF_FILTER_SOURCE);
57 }
58
59 SK_NO_INLINE_ATTRIBUTE
60 static struct __kern_packet *
get_next_pkt(struct nx_pktq * pktqs,int * curr,int end)61 get_next_pkt(struct nx_pktq *pktqs, int *curr, int end)
62 {
63 int i;
64 struct __kern_packet *p = NULL;
65
66 for (i = *curr; i >= end; i--) {
67 if ((p = nx_pktq_safe_deq(&pktqs[i])) != NULL) {
68 break;
69 }
70 }
71 *curr = i;
72 return p;
73 }
74
75 SK_NO_INLINE_ATTRIBUTE
76 static struct __kern_packet *
nx_netif_filter_tx_processed_pkt_dequeue(struct nexus_netif_adapter * nifna,kern_packet_svc_class_t sc,uint32_t pkt_limit,uint32_t byte_limit)77 nx_netif_filter_tx_processed_pkt_dequeue(struct nexus_netif_adapter *nifna,
78 kern_packet_svc_class_t sc, uint32_t pkt_limit, uint32_t byte_limit)
79 {
80 struct nx_netif *nif = nifna->nifna_netif;
81 int curr, end;
82 uint32_t cnt = 0, bytes = 0;
83 struct __kern_packet *p, *p_head = NULL, **p_tailp = &p_head;
84
85 if (sc == KPKT_SC_UNSPEC) {
86 /*
87 * If the sc is unspecified, walk the queues from the highest
88 * class to lowest.
89 */
90 curr = KPKT_TC_MAX - 1;
91 end = 0;
92 } else {
93 /*
94 * Only dequeue from the specified queue.
95 */
96 if (!KPKT_VALID_SVC(sc)) {
97 sc = KPKT_SC_BE;
98 }
99 curr = PKT_SC2TC(sc);
100 end = curr;
101 }
102 while (cnt < pkt_limit && bytes < byte_limit) {
103 p = get_next_pkt(nif->nif_tx_processed_pktq, &curr, end);
104 if (p == NULL) {
105 break;
106 }
107 cnt++;
108 bytes += p->pkt_length;
109 *p_tailp = p;
110 p_tailp = &p->pkt_nextpkt;
111 }
112 DTRACE_SKYWALK4(processed__pkt__dequeue, struct nexus_netif_adapter *,
113 nifna, uint32_t, cnt, uint32_t, bytes, struct __kern_packet *,
114 p_head);
115 return p_head;
116 }
117
118 errno_t
nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter * nifna,kern_packet_svc_class_t sc,struct __kern_packet * p_chain)119 nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter *nifna,
120 kern_packet_svc_class_t sc, struct __kern_packet *p_chain)
121 {
122 struct nx_netif *nif = nifna->nifna_netif;
123 struct netif_stats *nifs = &nif->nif_stats;
124 struct __kern_packet *p_tail = NULL;
125 uint32_t cnt = 0, bytes = 0, qlen = 0, tc;
126 struct nx_pktq *q;
127
128 /*
129 * It's not possible for sc to be unspecified here. Putting this check
130 * just to be safe.
131 */
132 if (!KPKT_VALID_SVC(sc)) {
133 sc = KPKT_SC_BE;
134 }
135 tc = PKT_SC2TC(sc);
136 VERIFY(tc < KPKT_TC_MAX);
137 q = &nif->nif_tx_processed_pktq[tc];
138 nx_netif_pkt_chain_info(p_chain, &p_tail, &cnt, &bytes);
139 nx_pktq_lock_spin(q);
140 if (__improbable((qlen = nx_pktq_len(q)) > nx_pktq_limit(q))) {
141 nx_pktq_unlock(q);
142 DTRACE_SKYWALK4(q__full, struct nexus_netif_adapter *, nifna,
143 struct nx_pktq *, q, uint32_t, qlen,
144 struct __kern_packet *, p_chain);
145 nx_netif_free_packet_chain(p_chain, NULL);
146 STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_PKTQ_FULL, cnt);
147 STATS_ADD(nifs, NETIF_STATS_DROP, cnt);
148 return ENOBUFS;
149 }
150 nx_pktq_enq_multi(q, p_chain, p_tail, cnt, bytes);
151 qlen = nx_pktq_len(q);
152
153 DTRACE_SKYWALK4(processed__pkt__enqueue, struct nexus_netif_adapter *,
154 nifna, struct nx_pktq *, q, uint32_t, qlen, uint32_t, cnt);
155 nx_pktq_unlock(q);
156 return 0;
157 }
158
159 static errno_t
nx_netif_tx_processed_pkt_get_len(struct nexus_netif_adapter * nifna,kern_packet_svc_class_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)160 nx_netif_tx_processed_pkt_get_len(struct nexus_netif_adapter *nifna,
161 kern_packet_svc_class_t sc, uint32_t *packets, uint32_t *bytes,
162 errno_t orig_err)
163 {
164 struct nx_netif *nif = nifna->nifna_netif;
165 struct nx_pktq *q;
166 uint32_t qlen = 0;
167 size_t qsize = 0;
168 errno_t err = 0;
169 int i;
170
171 if (sc == KPKT_SC_UNSPEC) {
172 for (i = KPKT_TC_MAX - 1; i >= 0; i--) {
173 q = &nif->nif_tx_processed_pktq[i];
174 nx_pktq_lock_spin(q);
175 qlen += nx_pktq_len(q);
176 qsize += nx_pktq_size(q);
177 nx_pktq_unlock(q);
178 }
179 } else {
180 if (!KPKT_VALID_SVC(sc)) {
181 sc = KPKT_SC_BE;
182 }
183 i = PKT_SC2TC(sc);
184 VERIFY(i >= 0 && i < KPKT_TC_MAX);
185 q = &nif->nif_tx_processed_pktq[i];
186 nx_pktq_lock_spin(q);
187 qlen += nx_pktq_len(q);
188 qsize += nx_pktq_size(q);
189 nx_pktq_unlock(q);
190 }
191 if (packets != NULL) {
192 *packets += qlen;
193 }
194 if (bytes != NULL) {
195 *bytes += (uint32_t)qsize;
196 }
197 /* Original error takes precedence if we have no processed packets */
198 if (qlen == 0) {
199 err = orig_err;
200 }
201
202 DTRACE_SKYWALK6(processed__pkt__qlen, struct nexus_netif_adapter *,
203 nifna, struct nx_pktq *, q, uint32_t, qlen, size_t, qsize,
204 uint32_t, (packets != NULL) ? *packets : 0,
205 uint32_t, (bytes != NULL) ? *bytes : 0);
206 return err;
207 }
208
209 static void
fix_dequeue_pkt_return_args(struct __kern_packet * p_chain,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,errno_t orig_err,errno_t * err)210 fix_dequeue_pkt_return_args(struct __kern_packet *p_chain, classq_pkt_t *head,
211 classq_pkt_t *tail, uint32_t *cnt, uint32_t *len, errno_t orig_err,
212 errno_t *err)
213 {
214 struct __kern_packet *p_tail = NULL;
215 uint32_t c = 0, l = 0;
216
217 nx_netif_pkt_chain_info(p_chain, &p_tail, &c, &l);
218 if (head != NULL) {
219 CLASSQ_PKT_INIT_PACKET(head, p_chain);
220 }
221 if (tail != NULL) {
222 CLASSQ_PKT_INIT_PACKET(tail, p_tail);
223 }
224 if (cnt != NULL) {
225 *cnt = c;
226 }
227 if (len != NULL) {
228 *len = l;
229 }
230
231 *err = (p_chain == NULL) ? EAGAIN : 0;
232
233 /*
234 * If we can't dequeue from either the AQM queue or the processed queue,
235 * the original (AQM queue) error takes precedence. If we can dequeue
236 * something, we ignore the original error. Most likely both errors
237 * can only be EAGAIN.
238 */
239 if (*err != 0 && orig_err != 0) {
240 *err = orig_err;
241 }
242 }
243
244 /*
245 * This is called after the driver has dequeued packets off from AQM.
246 * This callback is used for redirecting new packets to filters and
247 * processed packets back to the driver.
248 */
249 errno_t
nx_netif_native_tx_dequeue(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t pkt_limit,uint32_t byte_limit,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,boolean_t drvmgt,errno_t orig_err)250 nx_netif_native_tx_dequeue(struct nexus_netif_adapter *nifna,
251 uint32_t sc, uint32_t pkt_limit, uint32_t byte_limit,
252 classq_pkt_t *head, classq_pkt_t *tail, uint32_t *cnt, uint32_t *len,
253 boolean_t drvmgt, errno_t orig_err)
254 {
255 #pragma unused(drvmgt)
256 struct nx_netif *nif = nifna->nifna_netif;
257 errno_t err;
258 struct __kern_packet *p_chain;
259
260 if (__probable(nif->nif_filter_cnt == 0 &&
261 !NETIF_DEFAULT_DROP_ENABLED(nif))) {
262 return orig_err;
263 }
264 if (head->cp_kpkt != NULL) {
265 ASSERT(head->cp_ptype == QP_PACKET);
266 /*
267 * Moving new packets to filters.
268 * TODO:
269 * The number of packets to move should be dependent on
270 * the available ring space of the next filter. The limits
271 * should be adjusted at ifclassq_dequeue_common().
272 */
273 nx_netif_filter_tx_pkt_enqueue(nifna, head->cp_kpkt);
274 }
275
276 /*
277 * Move processed packets to the driver.
278 */
279 p_chain = nx_netif_filter_tx_processed_pkt_dequeue(nifna, sc,
280 pkt_limit, byte_limit);
281
282 fix_dequeue_pkt_return_args(p_chain, head, tail, cnt, len,
283 orig_err, &err);
284 return err;
285 }
286
287 /*
288 * This is called by the driver to get the ifnet queue length.
289 * Since the processed queue is separate from the ifnet send queue, this count
290 * needs to be retrieved separately and added to the ifnet send queue count.
291 */
292 errno_t
nx_netif_native_tx_get_len(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)293 nx_netif_native_tx_get_len(struct nexus_netif_adapter *nifna,
294 uint32_t sc, uint32_t *packets, uint32_t *bytes,
295 errno_t orig_err)
296 {
297 struct nx_netif *nif = nifna->nifna_netif;
298
299 if (__probable(nif->nif_filter_cnt == 0)) {
300 return orig_err;
301 }
302 return nx_netif_tx_processed_pkt_get_len(nifna, sc, packets,
303 bytes, orig_err);
304 }
305