1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <sys/sdt.h>
31
32 static void
nx_netif_filter_tx_mbuf_enqueue(struct nexus_netif_adapter * nifna,struct mbuf * m_chain)33 nx_netif_filter_tx_mbuf_enqueue(struct nexus_netif_adapter *nifna,
34 struct mbuf *m_chain)
35 {
36 struct __kern_packet *fpkt_chain;
37 struct nx_netif *nif = nifna->nifna_netif;
38 struct netif_stats *nifs = &nif->nif_stats;
39
40 if (nif->nif_filter_cnt == 0) {
41 uint32_t dropcnt = 0;
42
43 nx_netif_mbuf_chain_info(m_chain, NULL, &dropcnt, NULL);
44 m_freem_list(m_chain);
45 DTRACE_SKYWALK2(mbuf__default__drop, struct nx_netif *, nif,
46 uint32_t, dropcnt);
47 STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_DEFAULT, dropcnt);
48 STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
49 return;
50 }
51 fpkt_chain = nx_netif_mbuf_to_filter_pkt_chain(nifna, m_chain,
52 NETIF_CONVERT_TX);
53 if (fpkt_chain == NULL) {
54 return;
55 }
56 (void) nx_netif_filter_inject(nifna, NULL, fpkt_chain,
57 NETIF_FILTER_TX | NETIF_FILTER_SOURCE);
58 }
59
60 SK_NO_INLINE_ATTRIBUTE
61 static struct mbuf *
get_next_mbuf(struct nx_mbq * mbqs,int * curr,int end)62 get_next_mbuf(struct nx_mbq *mbqs, int *curr, int end)
63 {
64 int i;
65 struct mbuf *m = NULL;
66
67 for (i = *curr; i >= end; i--) {
68 if ((m = nx_mbq_safe_deq(&mbqs[i])) != NULL) {
69 break;
70 }
71 }
72 *curr = i;
73 return m;
74 }
75
76 SK_NO_INLINE_ATTRIBUTE
77 static struct mbuf *
nx_netif_filter_tx_processed_mbuf_dequeue(struct nexus_netif_adapter * nifna,mbuf_svc_class_t sc,uint32_t pkt_limit,uint32_t byte_limit)78 nx_netif_filter_tx_processed_mbuf_dequeue(struct nexus_netif_adapter *nifna,
79 mbuf_svc_class_t sc, uint32_t pkt_limit, uint32_t byte_limit)
80 {
81 struct nx_netif *nif = nifna->nifna_netif;
82 int curr, end;
83 uint32_t cnt = 0, bytes = 0;
84 struct mbuf *m, *m_head = NULL, **m_tailp = &m_head;
85
86 if (sc == MBUF_SC_UNSPEC) {
87 /*
88 * If the sc is unspecified, walk the queues from the highest
89 * class to lowest.
90 */
91 curr = MBUF_TC_MAX - 1;
92 end = 0;
93 } else {
94 /*
95 * Only dequeue from the specified queue.
96 */
97 if (!MBUF_VALID_SC(sc)) {
98 sc = MBUF_SC_BE;
99 }
100 curr = MBUF_SC2TC(sc);
101 end = curr;
102 }
103 while (cnt < pkt_limit && bytes < byte_limit) {
104 m = get_next_mbuf(nif->nif_tx_processed_mbq, &curr, end);
105 if (m == NULL) {
106 break;
107 }
108 cnt++;
109 bytes += m_pktlen(m);
110 *m_tailp = m;
111 m_tailp = &m->m_nextpkt;
112 }
113 DTRACE_SKYWALK4(processed__mbuf__dequeue, struct nexus_netif_adapter *,
114 nifna, uint32_t, cnt, uint32_t, bytes, struct mbuf *, m_head);
115 return m_head;
116 }
117
118 errno_t
nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter * nifna,mbuf_svc_class_t sc,struct mbuf * m_chain)119 nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter *nifna,
120 mbuf_svc_class_t sc, struct mbuf *m_chain)
121 {
122 struct nx_netif *nif = nifna->nifna_netif;
123 struct netif_stats *nifs = &nif->nif_stats;
124 struct mbuf *m_tail = NULL;
125 uint32_t cnt = 0, bytes = 0, qlen = 0, tc;
126 struct nx_mbq *q;
127
128 /*
129 * It's not possible for sc to be unspecified here. Putting this check
130 * just to be safe.
131 */
132 if (!MBUF_VALID_SC(sc)) {
133 sc = MBUF_SC_BE;
134 }
135 tc = MBUF_SC2TC(sc);
136 VERIFY(tc < MBUF_TC_MAX);
137 q = &nif->nif_tx_processed_mbq[tc];
138 nx_netif_mbuf_chain_info(m_chain, &m_tail, &cnt, &bytes);
139 nx_mbq_lock_spin(q);
140 if (__improbable((qlen = nx_mbq_len(q)) > nx_mbq_limit(q))) {
141 nx_mbq_unlock(q);
142 DTRACE_SKYWALK4(q__full, struct nexus_netif_adapter *, nifna,
143 struct nx_mbq *, q, uint32_t, qlen, struct mbuf *, m_chain);
144 m_freem_list(m_chain);
145 STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_MBQ_FULL, cnt);
146 STATS_ADD(nifs, NETIF_STATS_DROP, cnt);
147 return ENOBUFS;
148 }
149 nx_mbq_enq_multi(q, m_chain, m_tail, cnt, bytes);
150 qlen = nx_mbq_len(q);
151
152 DTRACE_SKYWALK4(processed__mbuf__enqueue, struct nexus_netif_adapter *,
153 nifna, struct nx_mbq *, q, uint32_t, qlen, uint32_t, cnt);
154 nx_mbq_unlock(q);
155 return 0;
156 }
157
158 static errno_t
nx_netif_tx_processed_mbuf_get_len(struct nexus_netif_adapter * nifna,mbuf_svc_class_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)159 nx_netif_tx_processed_mbuf_get_len(struct nexus_netif_adapter *nifna,
160 mbuf_svc_class_t sc, uint32_t *packets, uint32_t *bytes, errno_t orig_err)
161 {
162 struct nx_netif *nif = nifna->nifna_netif;
163 struct nx_mbq *q;
164 uint32_t qlen = 0;
165 size_t qsize = 0;
166 errno_t err = 0;
167 int i;
168
169 if (sc == MBUF_SC_UNSPEC) {
170 for (i = MBUF_TC_MAX - 1; i >= 0; i--) {
171 q = &nif->nif_tx_processed_mbq[i];
172 nx_mbq_lock_spin(q);
173 qlen += nx_mbq_len(q);
174 qsize += nx_mbq_size(q);
175 nx_mbq_unlock(q);
176 }
177 } else {
178 if (!MBUF_VALID_SC(sc)) {
179 sc = MBUF_SC_BE;
180 }
181 i = MBUF_SC2TC(sc);
182 VERIFY(i >= 0 && i < MBUF_TC_MAX);
183 q = &nif->nif_tx_processed_mbq[i];
184 nx_mbq_lock_spin(q);
185 qlen += nx_mbq_len(q);
186 qsize += nx_mbq_size(q);
187 nx_mbq_unlock(q);
188 }
189 if (packets != NULL) {
190 *packets += qlen;
191 }
192 if (bytes != NULL) {
193 *bytes += (uint32_t)qsize;
194 }
195 /* Original error takes precedence if we have no processed packets */
196 if (qlen == 0) {
197 err = orig_err;
198 }
199
200 DTRACE_SKYWALK6(processed__mbuf__qlen, struct nexus_netif_adapter *,
201 nifna, struct nx_mbq *, q, uint32_t, qlen, size_t, qsize,
202 uint32_t, (packets != NULL) ? *packets : 0,
203 uint32_t, (bytes != NULL) ? *bytes : 0);
204 return err;
205 }
206
207 static void
fix_dequeue_mbuf_return_args(struct mbuf * m_chain,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,errno_t orig_err,errno_t * err)208 fix_dequeue_mbuf_return_args(struct mbuf *m_chain, classq_pkt_t *head,
209 classq_pkt_t *tail, uint32_t *cnt, uint32_t *len, errno_t orig_err,
210 errno_t *err)
211 {
212 struct mbuf *m_tail = NULL;
213 uint32_t c = 0, l = 0;
214
215 nx_netif_mbuf_chain_info(m_chain, &m_tail, &c, &l);
216 if (head != NULL) {
217 CLASSQ_PKT_INIT_MBUF(head, m_chain);
218 }
219 if (tail != NULL) {
220 CLASSQ_PKT_INIT_MBUF(tail, m_tail);
221 }
222 if (cnt != NULL) {
223 *cnt = c;
224 }
225 if (len != NULL) {
226 *len = l;
227 }
228
229 *err = (m_chain == NULL) ? EAGAIN : 0;
230
231 /*
232 * If we can't dequeue from either the AQM queue or the processed queue,
233 * the original (AQM queue) error takes precedence. If we can dequeue
234 * something, we ignore the original error. Most likely both errors
235 * can only be EAGAIN.
236 */
237 if (*err != 0 && orig_err != 0) {
238 *err = orig_err;
239 }
240 }
241
242 /*
243 * This is called after the driver has dequeued packets off from AQM.
244 * This callback is used for redirecting new packets to filters and
245 * processed packets back to the driver.
246 */
247 errno_t
nx_netif_compat_tx_dequeue(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t pkt_limit,uint32_t byte_limit,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,boolean_t drvmgt,errno_t orig_err)248 nx_netif_compat_tx_dequeue(struct nexus_netif_adapter *nifna,
249 uint32_t sc, uint32_t pkt_limit, uint32_t byte_limit,
250 classq_pkt_t *head, classq_pkt_t *tail, uint32_t *cnt, uint32_t *len,
251 boolean_t drvmgt, errno_t orig_err)
252 {
253 #pragma unused(drvmgt)
254 struct nx_netif *nif = nifna->nifna_netif;
255 errno_t err;
256 struct mbuf *m_chain;
257
258 if (__probable(nif->nif_filter_cnt == 0 &&
259 !NETIF_DEFAULT_DROP_ENABLED(nif))) {
260 return orig_err;
261 }
262
263 if (head->cp_mbuf != NULL) {
264 ASSERT(head->cp_ptype == QP_MBUF);
265 /*
266 * Moving new packets to filters.
267 * TODO:
268 * The number of packets to move should be dependent on
269 * the available ring space of the next filter. The limits
270 * should be adjusted at ifclassq_dequeue_common().
271 */
272 nx_netif_filter_tx_mbuf_enqueue(nifna, head->cp_mbuf);
273 }
274
275 /*
276 * Move processed packets to the driver.
277 */
278 m_chain = nx_netif_filter_tx_processed_mbuf_dequeue(nifna, sc,
279 pkt_limit, byte_limit);
280
281 fix_dequeue_mbuf_return_args(m_chain, head, tail, cnt, len,
282 orig_err, &err);
283 return err;
284 }
285
286 /*
287 * This is called by the driver to get the ifnet queue length.
288 * Since the processed queue is separate from the ifnet send queue, this count
289 * needs to be retrieved separately and added to the ifnet send queue count.
290 */
291 errno_t
nx_netif_compat_tx_get_len(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)292 nx_netif_compat_tx_get_len(struct nexus_netif_adapter *nifna, uint32_t sc,
293 uint32_t *packets, uint32_t *bytes, errno_t orig_err)
294 {
295 struct nx_netif *nif = nifna->nifna_netif;
296
297 if (__probable(nif->nif_filter_cnt == 0)) {
298 return orig_err;
299 }
300 return nx_netif_tx_processed_mbuf_get_len(nifna, sc, packets,
301 bytes, orig_err);
302 }
303