xref: /xnu-12377.81.4/bsd/skywalk/nexus/netif/nx_netif_filter_native.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <sys/sdt.h>
31 
32 static void
nx_netif_filter_tx_pkt_enqueue(struct nexus_netif_adapter * nifna,struct __kern_packet * pkt_chain)33 nx_netif_filter_tx_pkt_enqueue(struct nexus_netif_adapter *nifna,
34     struct __kern_packet *pkt_chain)
35 {
36 	struct __kern_packet *fpkt_chain;
37 	struct nx_netif *nif = nifna->nifna_netif;
38 	struct netif_stats *nifs = &nif->nif_stats;
39 
40 	if (nif->nif_filter_cnt == 0) {
41 		int dropcnt = 0;
42 
43 		nx_netif_free_packet_chain(pkt_chain, &dropcnt);
44 		DTRACE_SKYWALK2(pkt__default__drop, struct nx_netif *, nif,
45 		    int, dropcnt);
46 		STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_DEFAULT, dropcnt);
47 		STATS_ADD(nifs, NETIF_STATS_DROP, dropcnt);
48 		return;
49 	}
50 	fpkt_chain = nx_netif_pkt_to_filter_pkt_chain(nifna, pkt_chain,
51 	    NETIF_CONVERT_TX);
52 	if (fpkt_chain == NULL) {
53 		return;
54 	}
55 	(void) nx_netif_filter_inject(nifna, NULL, fpkt_chain,
56 	    NETIF_FILTER_TX | NETIF_FILTER_SOURCE);
57 }
58 
59 SK_NO_INLINE_ATTRIBUTE
60 static struct __kern_packet *
get_next_pkt(struct nx_pktq pktqs[KPKT_TC_MAX],int * curr,int end)61 get_next_pkt(struct nx_pktq pktqs[KPKT_TC_MAX], int *curr, int end)
62 {
63 	int i;
64 	struct __kern_packet *p = NULL;
65 
66 	for (i = *curr; i >= end; i--) {
67 		if ((p = nx_pktq_safe_deq(&pktqs[i])) != NULL) {
68 			break;
69 		}
70 	}
71 	*curr = i;
72 	return p;
73 }
74 
75 SK_NO_INLINE_ATTRIBUTE
76 static struct __kern_packet *
nx_netif_filter_tx_processed_pkt_dequeue(struct nexus_netif_adapter * nifna,kern_packet_svc_class_t sc,uint32_t pkt_limit,uint32_t byte_limit)77 nx_netif_filter_tx_processed_pkt_dequeue(struct nexus_netif_adapter *nifna,
78     kern_packet_svc_class_t sc, uint32_t pkt_limit, uint32_t byte_limit)
79 {
80 	struct nx_netif *nif = nifna->nifna_netif;
81 	int curr, end;
82 	uint32_t cnt = 0, bytes = 0;
83 	struct __kern_packet *p, *__single p_head = NULL;
84 	struct __kern_packet **p_tailp = &p_head;
85 
86 	if (sc == KPKT_SC_UNSPEC) {
87 		/*
88 		 * If the sc is unspecified, walk the queues from the highest
89 		 * class to lowest.
90 		 */
91 		curr = KPKT_TC_MAX - 1;
92 		end = 0;
93 	} else {
94 		/*
95 		 * Only dequeue from the specified queue.
96 		 */
97 		if (!KPKT_VALID_SVC(sc)) {
98 			sc = KPKT_SC_BE;
99 		}
100 		curr = PKT_SC2TC(sc);
101 		end = curr;
102 	}
103 	while (cnt < pkt_limit && bytes < byte_limit) {
104 		p = get_next_pkt(nif->nif_tx_processed_pktq, &curr, end);
105 		if (p == NULL) {
106 			break;
107 		}
108 		cnt++;
109 		bytes += p->pkt_length;
110 		*p_tailp =  p;
111 		p_tailp = &p->pkt_nextpkt;
112 	}
113 	DTRACE_SKYWALK4(processed__pkt__dequeue, struct nexus_netif_adapter *,
114 	    nifna, uint32_t, cnt, uint32_t, bytes, struct __kern_packet *,
115 	    p_head);
116 	return p_head;
117 }
118 
119 errno_t
nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter * nifna,kern_packet_svc_class_t sc,struct __kern_packet * p_chain)120 nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter *nifna,
121     kern_packet_svc_class_t sc, struct __kern_packet *p_chain)
122 {
123 	struct nx_netif *nif = nifna->nifna_netif;
124 	struct netif_stats *nifs = &nif->nif_stats;
125 	struct __kern_packet *__single p_tail = NULL;
126 	uint32_t cnt = 0, bytes = 0, qlen = 0, tc;
127 	struct nx_pktq *q;
128 
129 	/*
130 	 * It's not possible for sc to be unspecified here. Putting this check
131 	 * just to be safe.
132 	 */
133 	if (!KPKT_VALID_SVC(sc)) {
134 		sc = KPKT_SC_BE;
135 	}
136 	tc = PKT_SC2TC(sc);
137 	VERIFY(tc < KPKT_TC_MAX);
138 	q = &nif->nif_tx_processed_pktq[tc];
139 	nx_netif_pkt_chain_info(p_chain, &p_tail, &cnt, &bytes);
140 	nx_pktq_lock_spin(q);
141 	if (__improbable((qlen = nx_pktq_len(q)) > nx_pktq_limit(q))) {
142 		nx_pktq_unlock(q);
143 		DTRACE_SKYWALK4(q__full, struct nexus_netif_adapter *, nifna,
144 		    struct nx_pktq *, q, uint32_t, qlen,
145 		    struct __kern_packet *, p_chain);
146 		nx_netif_free_packet_chain(p_chain, NULL);
147 		STATS_ADD(nifs, NETIF_STATS_FILTER_DROP_PKTQ_FULL, cnt);
148 		STATS_ADD(nifs, NETIF_STATS_DROP, cnt);
149 		return ENOBUFS;
150 	}
151 	nx_pktq_enq_multi(q, p_chain, p_tail, cnt, bytes);
152 	qlen = nx_pktq_len(q);
153 
154 	DTRACE_SKYWALK4(processed__pkt__enqueue, struct nexus_netif_adapter *,
155 	    nifna, struct nx_pktq *, q, uint32_t, qlen, uint32_t, cnt);
156 	nx_pktq_unlock(q);
157 	return 0;
158 }
159 
160 static errno_t
nx_netif_tx_processed_pkt_get_len(struct nexus_netif_adapter * nifna,kern_packet_svc_class_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)161 nx_netif_tx_processed_pkt_get_len(struct nexus_netif_adapter *nifna,
162     kern_packet_svc_class_t sc, uint32_t *packets, uint32_t *bytes,
163     errno_t orig_err)
164 {
165 	struct nx_netif *nif = nifna->nifna_netif;
166 	struct nx_pktq *q;
167 	uint32_t qlen = 0;
168 	size_t qsize = 0;
169 	errno_t err = 0;
170 	int i;
171 
172 	if (sc == KPKT_SC_UNSPEC) {
173 		for (i = KPKT_TC_MAX - 1; i >= 0; i--) {
174 			q = &nif->nif_tx_processed_pktq[i];
175 			nx_pktq_lock_spin(q);
176 			qlen += nx_pktq_len(q);
177 			qsize += nx_pktq_size(q);
178 			nx_pktq_unlock(q);
179 		}
180 	} else {
181 		if (!KPKT_VALID_SVC(sc)) {
182 			sc = KPKT_SC_BE;
183 		}
184 		i = PKT_SC2TC(sc);
185 		VERIFY(i >= 0 && i < KPKT_TC_MAX);
186 		q = &nif->nif_tx_processed_pktq[i];
187 		nx_pktq_lock_spin(q);
188 		qlen += nx_pktq_len(q);
189 		qsize += nx_pktq_size(q);
190 		nx_pktq_unlock(q);
191 	}
192 	if (packets != NULL) {
193 		*packets += qlen;
194 	}
195 	if (bytes != NULL) {
196 		*bytes += (uint32_t)qsize;
197 	}
198 	/* Original error takes precedence if we have no processed packets */
199 	if (qlen == 0) {
200 		err = orig_err;
201 	}
202 
203 	DTRACE_SKYWALK6(processed__pkt__qlen, struct nexus_netif_adapter *,
204 	    nifna, struct nx_pktq *, q, uint32_t, qlen, size_t, qsize,
205 	    uint32_t, (packets != NULL) ? *packets : 0,
206 	    uint32_t, (bytes != NULL) ? *bytes : 0);
207 	return err;
208 }
209 
210 static void
fix_dequeue_pkt_return_args(struct __kern_packet * p_chain,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,errno_t orig_err,errno_t * err)211 fix_dequeue_pkt_return_args(struct __kern_packet *p_chain, classq_pkt_t *head,
212     classq_pkt_t *tail, uint32_t *cnt, uint32_t *len, errno_t orig_err,
213     errno_t *err)
214 {
215 	struct __kern_packet *__single p_tail = NULL;
216 	uint32_t c = 0, l = 0;
217 
218 	nx_netif_pkt_chain_info(p_chain, &p_tail, &c, &l);
219 	if (head != NULL) {
220 		CLASSQ_PKT_INIT_PACKET(head, p_chain);
221 	}
222 	if (tail != NULL) {
223 		CLASSQ_PKT_INIT_PACKET(tail, p_tail);
224 	}
225 	if (cnt != NULL) {
226 		*cnt = c;
227 	}
228 	if (len != NULL) {
229 		*len = l;
230 	}
231 
232 	*err = (p_chain == NULL) ? EAGAIN : 0;
233 
234 	/*
235 	 * If we can't dequeue from either the AQM queue or the processed queue,
236 	 * the original (AQM queue) error takes precedence. If we can dequeue
237 	 * something, we ignore the original error. Most likely both errors
238 	 * can only be EAGAIN.
239 	 */
240 	if (*err != 0 && orig_err != 0) {
241 		*err = orig_err;
242 	}
243 }
244 
245 /*
246  * This is called after the driver has dequeued packets off from AQM.
247  * This callback is used for redirecting new packets to filters and
248  * processed packets back to the driver.
249  */
250 errno_t
nx_netif_native_tx_dequeue(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t pkt_limit,uint32_t byte_limit,classq_pkt_t * head,classq_pkt_t * tail,uint32_t * cnt,uint32_t * len,boolean_t drvmgt,errno_t orig_err)251 nx_netif_native_tx_dequeue(struct nexus_netif_adapter *nifna,
252     uint32_t sc, uint32_t pkt_limit, uint32_t byte_limit,
253     classq_pkt_t *head, classq_pkt_t *tail, uint32_t *cnt, uint32_t *len,
254     boolean_t drvmgt, errno_t orig_err)
255 {
256 #pragma unused(drvmgt)
257 	struct nx_netif *nif = nifna->nifna_netif;
258 	errno_t err;
259 	struct __kern_packet *p_chain;
260 
261 	if (__probable(nif->nif_filter_cnt == 0 &&
262 	    !NETIF_DEFAULT_DROP_ENABLED(nif))) {
263 		return orig_err;
264 	}
265 	if (head->cp_kpkt != NULL) {
266 		ASSERT(head->cp_ptype == QP_PACKET);
267 		/*
268 		 * Moving new packets to filters.
269 		 * TODO:
270 		 * The number of packets to move should be dependent on
271 		 * the available ring space of the next filter. The limits
272 		 * should be adjusted at ifclassq_dequeue().
273 		 */
274 		nx_netif_filter_tx_pkt_enqueue(nifna, head->cp_kpkt);
275 	}
276 
277 	/*
278 	 * Move processed packets to the driver.
279 	 */
280 	p_chain = nx_netif_filter_tx_processed_pkt_dequeue(nifna, sc,
281 	    pkt_limit, byte_limit);
282 
283 	fix_dequeue_pkt_return_args(p_chain, head, tail, cnt, len,
284 	    orig_err, &err);
285 	return err;
286 }
287 
288 /*
289  * This is called by the driver to get the ifnet queue length.
290  * Since the processed queue is separate from the ifnet send queue, this count
291  * needs to be retrieved separately and added to the ifnet send queue count.
292  */
293 errno_t
nx_netif_native_tx_get_len(struct nexus_netif_adapter * nifna,uint32_t sc,uint32_t * packets,uint32_t * bytes,errno_t orig_err)294 nx_netif_native_tx_get_len(struct nexus_netif_adapter *nifna,
295     uint32_t sc, uint32_t *packets, uint32_t *bytes,
296     errno_t orig_err)
297 {
298 	struct nx_netif *nif = nifna->nifna_netif;
299 
300 	if (__probable(nif->nif_filter_cnt == 0)) {
301 		return orig_err;
302 	}
303 	return nx_netif_tx_processed_pkt_get_len(nifna, sc, packets,
304 	           bytes, orig_err);
305 }
306