xref: /xnu-12377.1.9/bsd/skywalk/nexus/netif/nx_netif_filter_compat.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <sys/sdt.h>
31 
32 static void
nx_netif_filter_tx_mbuf_enqueue(struct nexus_netif_adapter * nifna,struct mbuf * m_chain)33 nx_netif_filter_tx_mbuf_enqueue(struct nexus_netif_adapter *nifna,
34     struct mbuf *m_chain)
35 {
36 	struct __kern_packet *fpkt_chain;
37 	struct nx_netif *nif = nifna->nifna_netif;
38 	struct netif_stats *nifs = &nif->nif_stats;
39 
40 	if (nif->nif_filter_cnt == 0) {
41 		uint32_t dropcnt = 0;
42 
43 		nx_netif_mbuf_chain_info(m_chain, NULL, &dropcnt, NULL);
44 		m_freem_list(m_chain);
45 		DTRACE_SKYWALK2(mbuf__default__drop, struct nx_netif *, nif,
46 		    uint32_t, dropcnt);
47 		STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_DEFAULT, dropcnt);
48 		STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
49 		return;
50 	}
51 	fpkt_chain = nx_netif_mbuf_to_filter_pkt_chain(nifna, m_chain,
52 	    NETIF_CONVERT_TX);
53 	if (fpkt_chain == NULL) {
54 		return;
55 	}
56 	(void) nx_netif_filter_inject(nifna, NULL, fpkt_chain,
57 	    NETIF_FILTER_TX | NETIF_FILTER_SOURCE);
58 }
59 
60 SK_NO_INLINE_ATTRIBUTE
61 static struct mbuf *
get_next_mbuf(struct nx_mbq mbqs[MBUF_TC_MAX],int * curr,int end)62 get_next_mbuf(struct nx_mbq mbqs[MBUF_TC_MAX], int *curr, int end)
63 {
64 	int i;
65 	struct mbuf *m = NULL;
66 
67 	for (i = *curr; i >= end; i--) {
68 		if ((m = nx_mbq_safe_deq(&mbqs[i])) != NULL) {
69 			break;
70 		}
71 	}
72 	*curr = i;
73 	return m;
74 }
75 
76 SK_NO_INLINE_ATTRIBUTE
77 static struct mbuf *
nx_netif_filter_tx_processed_mbuf_dequeue(struct nexus_netif_adapter * nifna,mbuf_svc_class_t sc,uint32_t pkt_limit,uint32_t byte_limit)78 nx_netif_filter_tx_processed_mbuf_dequeue(struct nexus_netif_adapter *nifna,
79     mbuf_svc_class_t sc, uint32_t pkt_limit, uint32_t byte_limit)
80 {
81 	struct nx_netif *nif = nifna->nifna_netif;
82 	int curr, end;
83 	uint32_t cnt = 0, bytes = 0;
84 	struct mbuf *m, *__single m_head = NULL;
85 	struct mbuf **m_tailp = &m_head;
86 
87 	if (sc == MBUF_SC_UNSPEC) {
88 		/*
89 		 * If the sc is unspecified, walk the queues from the highest
90 		 * class to lowest.
91 		 */
92 		curr = MBUF_TC_MAX - 1;
93 		end = 0;
94 	} else {
95 		/*
96 		 * Only dequeue from the specified queue.
97 		 */
98 		if (!MBUF_VALID_SC(sc)) {
99 			sc = MBUF_SC_BE;
100 		}
101 		curr = MBUF_SC2TC(sc);
102 		end = curr;
103 	}
104 	while (cnt < pkt_limit && bytes < byte_limit) {
105 		m = get_next_mbuf(nif->nif_tx_processed_mbq, &curr, end);
106 		if (m == NULL) {
107 			break;
108 		}
109 		cnt++;
110 		bytes += m_pktlen(m);
111 		*m_tailp =  m;
112 		m_tailp = &m->m_nextpkt;
113 	}
114 	DTRACE_SKYWALK4(processed__mbuf__dequeue, struct nexus_netif_adapter *,
115 	    nifna, uint32_t, cnt, uint32_t, bytes, struct mbuf *, m_head);
116 	return m_head;
117 }
118 
119 errno_t
nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter * nifna,mbuf_svc_class_t sc,struct mbuf * m_chain)120 nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter *nifna,
121     mbuf_svc_class_t sc, struct mbuf *m_chain)
122 {
123 	struct nx_netif *nif = nifna->nifna_netif;
124 	struct netif_stats *nifs = &nif->nif_stats;
125 	struct mbuf *__single m_tail = NULL;
126 	uint32_t cnt = 0, bytes = 0, qlen = 0, tc;
127 	struct nx_mbq *q;
128 
129 	/*
130 	 * It's not possible for sc to be unspecified here. Putting this check
131 	 * just to be safe.
132 	 */
133 	if (!MBUF_VALID_SC(sc)) {
134 		sc = MBUF_SC_BE;
135 	}
136 	tc = MBUF_SC2TC(sc);
137 	VERIFY(tc < MBUF_TC_MAX);
138 	q = &nif->nif_tx_processed_mbq[tc];
139 	nx_netif_mbuf_chain_info(m_chain, &m_tail, &cnt, &bytes);
140 	nx_mbq_lock_spin(q);
141 	if (__improbable((qlen = nx_mbq_len(q)) > nx_mbq_limit(q))) {
142 		nx_mbq_unlock(q);
143 		DTRACE_SKYWALK4(q__full, struct nexus_netif_adapter *, nifna,
144 		    struct nx_mbq *, q, uint32_t, qlen, struct mbuf *, m_chain);
145 		m_freem_list(m_chain);
146 		STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_MBQ_FULL, cnt);
147 		STATS_ADD(nifs, NETIF_STATS_DROP, cnt);
148 		return ENOBUFS;
149 	}
150 	nx_mbq_enq_multi(q, m_chain, m_tail, cnt, bytes);
151 	qlen = nx_mbq_len(q);
152 
153 	DTRACE_SKYWALK4(processed__mbuf__enqueue, struct nexus_netif_adapter *,
154 	    nifna, struct nx_mbq *, q, uint32_t, qlen, uint32_t, cnt);
155 	nx_mbq_unlock(q);
156 	return 0;
157 }
158 
159 static errno_t
nx_netif_tx_processed_mbuf_get_len(struct nexus_netif_adapter * nifna,mbuf_svc_class_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)160 nx_netif_tx_processed_mbuf_get_len(struct nexus_netif_adapter *nifna,
161     mbuf_svc_class_t sc, uint32_t *packets, uint32_t *bytes, errno_t orig_err)
162 {
163 	struct nx_netif *nif = nifna->nifna_netif;
164 	struct nx_mbq *q;
165 	uint32_t qlen = 0;
166 	size_t qsize = 0;
167 	errno_t err = 0;
168 	int i;
169 
170 	if (sc == MBUF_SC_UNSPEC) {
171 		for (i = MBUF_TC_MAX - 1; i >= 0; i--) {
172 			q = &nif->nif_tx_processed_mbq[i];
173 			nx_mbq_lock_spin(q);
174 			qlen += nx_mbq_len(q);
175 			qsize += nx_mbq_size(q);
176 			nx_mbq_unlock(q);
177 		}
178 	} else {
179 		if (!MBUF_VALID_SC(sc)) {
180 			sc = MBUF_SC_BE;
181 		}
182 		i = MBUF_SC2TC(sc);
183 		VERIFY(i >= 0 && i < MBUF_TC_MAX);
184 		q = &nif->nif_tx_processed_mbq[i];
185 		nx_mbq_lock_spin(q);
186 		qlen += nx_mbq_len(q);
187 		qsize += nx_mbq_size(q);
188 		nx_mbq_unlock(q);
189 	}
190 	if (packets != NULL) {
191 		*packets += qlen;
192 	}
193 	if (bytes != NULL) {
194 		*bytes += (uint32_t)qsize;
195 	}
196 	/* Original error takes precedence if we have no processed packets */
197 	if (qlen == 0) {
198 		err = orig_err;
199 	}
200 
201 	DTRACE_SKYWALK6(processed__mbuf__qlen, struct nexus_netif_adapter *,
202 	    nifna, struct nx_mbq *, q, uint32_t, qlen, size_t, qsize,
203 	    uint32_t, (packets != NULL) ? *packets : 0,
204 	    uint32_t, (bytes != NULL) ? *bytes : 0);
205 	return err;
206 }
207 
208 static void
fix_dequeue_mbuf_return_args(struct mbuf * m_chain,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,errno_t orig_err,errno_t * err)209 fix_dequeue_mbuf_return_args(struct mbuf *m_chain, classq_pkt_t *head,
210     classq_pkt_t *tail, uint32_t *cnt, uint32_t *len, errno_t orig_err,
211     errno_t *err)
212 {
213 	struct mbuf *__single m_tail = NULL;
214 	uint32_t c = 0, l = 0;
215 
216 	nx_netif_mbuf_chain_info(m_chain, &m_tail, &c, &l);
217 	if (head != NULL) {
218 		CLASSQ_PKT_INIT_MBUF(head, m_chain);
219 	}
220 	if (tail != NULL) {
221 		CLASSQ_PKT_INIT_MBUF(tail, m_tail);
222 	}
223 	if (cnt != NULL) {
224 		*cnt = c;
225 	}
226 	if (len != NULL) {
227 		*len = l;
228 	}
229 
230 	*err = (m_chain == NULL) ? EAGAIN : 0;
231 
232 	/*
233 	 * If we can't dequeue from either the AQM queue or the processed queue,
234 	 * the original (AQM queue) error takes precedence. If we can dequeue
235 	 * something, we ignore the original error. Most likely both errors
236 	 * can only be EAGAIN.
237 	 */
238 	if (*err != 0 && orig_err != 0) {
239 		*err = orig_err;
240 	}
241 }
242 
243 /*
244  * This is called after the driver has dequeued packets off from AQM.
245  * This callback is used for redirecting new packets to filters and
246  * processed packets back to the driver.
247  */
248 errno_t
nx_netif_compat_tx_dequeue(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t pkt_limit,uint32_t byte_limit,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,boolean_t drvmgt,errno_t orig_err)249 nx_netif_compat_tx_dequeue(struct nexus_netif_adapter *nifna,
250     uint32_t sc, uint32_t pkt_limit, uint32_t byte_limit,
251     classq_pkt_t *head, classq_pkt_t *tail, uint32_t *cnt, uint32_t *len,
252     boolean_t drvmgt, errno_t orig_err)
253 {
254 #pragma unused(drvmgt)
255 	struct nx_netif *nif = nifna->nifna_netif;
256 	errno_t err;
257 	struct mbuf *m_chain;
258 
259 	if (__probable(nif->nif_filter_cnt == 0 &&
260 	    !NETIF_DEFAULT_DROP_ENABLED(nif))) {
261 		return orig_err;
262 	}
263 
264 	if (head->cp_mbuf != NULL) {
265 		ASSERT(head->cp_ptype == QP_MBUF);
266 		/*
267 		 * Moving new packets to filters.
268 		 * TODO:
269 		 * The number of packets to move should be dependent on
270 		 * the available ring space of the next filter. The limits
271 		 * should be adjusted at ifclassq_dequeue().
272 		 */
273 		nx_netif_filter_tx_mbuf_enqueue(nifna, head->cp_mbuf);
274 	}
275 
276 	/*
277 	 * Move processed packets to the driver.
278 	 */
279 	m_chain = nx_netif_filter_tx_processed_mbuf_dequeue(nifna, sc,
280 	    pkt_limit, byte_limit);
281 
282 	fix_dequeue_mbuf_return_args(m_chain, head, tail, cnt, len,
283 	    orig_err, &err);
284 	return err;
285 }
286 
287 /*
288  * This is called by the driver to get the ifnet queue length.
289  * Since the processed queue is separate from the ifnet send queue, this count
290  * needs to be retrieved separately and added to the ifnet send queue count.
291  */
292 errno_t
nx_netif_compat_tx_get_len(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)293 nx_netif_compat_tx_get_len(struct nexus_netif_adapter *nifna, uint32_t sc,
294     uint32_t *packets, uint32_t *bytes, errno_t orig_err)
295 {
296 	struct nx_netif *nif = nifna->nifna_netif;
297 
298 	if (__probable(nif->nif_filter_cnt == 0)) {
299 		return orig_err;
300 	}
301 	return nx_netif_tx_processed_mbuf_get_len(nifna, sc, packets,
302 	           bytes, orig_err);
303 }
304