xref: /xnu-8792.61.2/bsd/skywalk/nexus/flowswitch/fsw_dp.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2015-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  *   1. Redistributions of source code must retain the above copyright
36  *      notice, this list of conditions and the following disclaimer.
37  *   2. Redistributions in binary form must reproduce the above copyright
38  *      notice, this list of conditions and the following disclaimer in the
39  *      documentation and/or other materials provided with the distribution.
40  *
41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
42  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51  * SUCH DAMAGE.
52  */
53 
54 /*
55  *  BSD LICENSE
56  *
57  * Copyright(c) 2015 NEC Europe Ltd. All rights reserved.
58  *  All rights reserved.
59  *
60  * Redistribution and use in source and binary forms, with or without
61  *  modification, are permitted provided that the following conditions
62  *  are met:
63  *
64  *    * Redistributions of source code must retain the above copyright
65  *      notice, this list of conditions and the following disclaimer.
66  *    * Redistributions in binary form must reproduce the above copyright
67  *      notice, this list of conditions and the following disclaimer in
68  *      the documentation and/or other materials provided with the
69  *      distribution.
70  *    * Neither the name of NEC Europe Ltd. nor the names of
71  *      its contributors may be used to endorse or promote products derived
72  *      from this software without specific prior written permission.
73  *
74  *  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
75  *  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
76  *  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
77  *  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
78  *  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
79  *  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
80  *  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
81  *  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
82  *  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
83  *  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
84  *  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
85  */
86 
87 #include <skywalk/os_skywalk_private.h>
88 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
89 #include <skywalk/nexus/flowswitch/fsw_var.h>
90 #include <skywalk/nexus/netif/nx_netif.h>
91 #include <skywalk/nexus/netif/nx_netif_compat.h>
92 #include <kern/sched_prim.h>
93 #include <sys/kdebug.h>
94 #include <sys/sdt.h>
95 #include <net/bpf.h>
96 #include <net/if_ports_used.h>
97 #include <net/pktap.h>
98 #include <net/pktsched/pktsched_netem.h>
99 #include <netinet/tcp.h>
100 #include <netinet/udp.h>
101 #include <netinet/ip.h>
102 #include <netinet/ip6.h>
103 
104 extern kern_return_t thread_terminate(thread_t);
105 
106 #define FSW_ZONE_MAX                  256
107 #define FSW_ZONE_NAME                 "skywalk.nx.fsw"
108 
109 static uint64_t fsw_reap_last __sk_aligned(8);
110 static uint64_t fsw_want_purge __sk_aligned(8);
111 
112 #define NX_FSW_FE_TABLESZ       256     /* some power of 2 */
113 static uint32_t fsw_fe_table_size = NX_FSW_FE_TABLESZ;
114 
115 #define NX_FSW_FOB_HASHSZ       31      /* some mersenne prime */
116 static uint32_t fsw_flow_owner_buckets = NX_FSW_FOB_HASHSZ;
117 
118 #define NX_FSW_FRB_HASHSZ       128     /* some power of 2 */
119 static uint32_t fsw_flow_route_buckets = NX_FSW_FRB_HASHSZ;
120 
121 #define NX_FSW_FRIB_HASHSZ      13      /* some mersenne prime */
122 static uint32_t fsw_flow_route_id_buckets = NX_FSW_FRIB_HASHSZ;
123 
124 #define NX_FSW_FLOW_REAP_INTERVAL 1     /* seconds */
125 static uint32_t fsw_flow_reap_interval = NX_FSW_FLOW_REAP_INTERVAL;
126 
127 #define NX_FSW_FLOW_PURGE_THRES 0       /* purge every N reaps (0 = disable) */
128 static uint32_t fsw_flow_purge_thresh = NX_FSW_FLOW_PURGE_THRES;
129 
130 #define FSW_REAP_IVAL            (MAX(1, fsw_flow_reap_interval))
131 #define FSW_REAP_SK_THRES        (FSW_REAP_IVAL << 5)
132 #define FSW_REAP_IF_THRES        (FSW_REAP_IVAL << 5)
133 #define FSW_DRAIN_CH_THRES       (FSW_REAP_IVAL << 5)
134 #define FSW_IFSTATS_THRES        1
135 
136 #define RX_BUFLET_BATCH_COUNT 64 /* max batch size for buflet allocation */
137 
138 uint32_t fsw_rx_batch = NX_FSW_RXBATCH; /* # of packets per batch (RX) */
139 uint32_t fsw_tx_batch = NX_FSW_TXBATCH; /* # of packets per batch (TX) */
140 #if (DEVELOPMENT || DEBUG)
141 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, rx_batch,
142     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_rx_batch, 0,
143     "flowswitch Rx batch size");
144 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, tx_batch,
145     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_tx_batch, 0,
146     "flowswitch Tx batch size");
147 #endif /* !DEVELOPMENT && !DEBUG */
148 
149 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, rx_agg_tcp,
150     CTLFLAG_RW | CTLFLAG_LOCKED, &sk_fsw_rx_agg_tcp, 0,
151     "flowswitch RX aggregation for tcp flows (enable/disable)");
152 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, rx_agg_tcp_host,
153     CTLFLAG_RW | CTLFLAG_LOCKED, &sk_fsw_rx_agg_tcp_host, 0,
154     "flowswitch RX aggregation for tcp kernel path (0/1/2 (off/on/auto))");
155 
156 /*
157  * IP reassembly
158  * The "kern.skywalk.flowswitch.ip_reass" sysctl can be used to force
159  * enable/disable the reassembly routine regardless of whether the
160  * transport netagent is enabled or not.
161  *
162  * 'fsw_ip_reass' is a tri-state:
163  *    0 means force IP reassembly off
164  *    1 means force IP reassembly on
165  *    2 means don't force the value, use what's appropriate for this flowswitch
166  */
167 #define FSW_IP_REASS_FORCE_OFF          0
168 #define FSW_IP_REASS_FORCE_ON           1
169 #define FSW_IP_REASS_NO_FORCE           2
170 
171 uint32_t fsw_ip_reass = FSW_IP_REASS_NO_FORCE;
172 
173 static int
174 fsw_ip_reass_sysctl SYSCTL_HANDLER_ARGS
175 {
176 #pragma unused(oidp, arg1, arg2)
177 	unsigned int new_value;
178 	int changed;
179 	int error;
180 
181 	error = sysctl_io_number(req, fsw_ip_reass, sizeof(fsw_ip_reass),
182 	    &new_value, &changed);
183 	if (error == 0 && changed != 0) {
184 		if (new_value > FSW_IP_REASS_NO_FORCE) {
185 			return EINVAL;
186 		}
187 		fsw_ip_reass = new_value;
188 	}
189 	return error;
190 }
191 
192 SYSCTL_PROC(_kern_skywalk_flowswitch, OID_AUTO, ip_reass,
193     CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
194     0, 0, fsw_ip_reass_sysctl, "IU",
195     "adjust flowswitch IP reassembly");
196 
197 #if (DEVELOPMENT || DEBUG)
198 static uint64_t _fsw_inject_error = 0;
199 #define _FSW_INJECT_ERROR(_en, _ev, _ec, _f, ...) \
200 	_SK_INJECT_ERROR(_fsw_inject_error, _en, _ev, _ec, \
201 	&FSW_STATS_VAL(_FSW_STATS_ERROR_INJECTIONS), _f, __VA_ARGS__)
202 
203 #define _FSW_INJECT_ERROR_SET(_en, _f, ...) do { \
204 	if (__improbable(((_fsw_inject_error) & (1ULL << (_en))) != 0)) { \
205 	        SK_DF(SK_VERB_ERROR_INJECT, "injecting error %d", (_en));\
206 	        if ((_f) != NULL)                                       \
207 	                (_f)(__VA_ARGS__);                              \
208 	}                                                               \
209 } while (0)
210 
211 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, flow_owner_buckets,
212     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_flow_owner_buckets, 0, "");
213 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, fe_table_size,
214     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_fe_table_size, 0, "");
215 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, flow_route_buckets,
216     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_flow_route_buckets, 0, "");
217 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO,
218     flow_route_id_buckets, CTLFLAG_RW | CTLFLAG_LOCKED,
219     &fsw_flow_route_id_buckets, 0, "");
220 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, flow_reap_interval,
221     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_flow_reap_interval, 0, "");
222 SYSCTL_UINT(_kern_skywalk_flowswitch, OID_AUTO, flow_purge_thresh,
223     CTLFLAG_RW | CTLFLAG_LOCKED, &fsw_flow_purge_thresh, 0, "");
224 SYSCTL_QUAD(_kern_skywalk_flowswitch, OID_AUTO, fsw_inject_error,
225     CTLFLAG_RW | CTLFLAG_LOCKED, &_fsw_inject_error, "");
226 #else
227 #define _FSW_INJECT_ERROR(_en, _ev, _ec, _f, ...) do { } while (0)
228 #define _FSW_INJECT_ERROR_SET(_en, _f, ...) do { } while (0)
229 #endif /* !DEVELOPMENT && !DEBUG */
230 
231 static void fsw_linger_remove_internal(struct flow_entry_linger_head *,
232     struct flow_entry *);
233 static void fsw_reap_thread_func(void *, wait_result_t);
234 static void fsw_reap_thread_cont(void *, wait_result_t);
235 static void fsw_purge_cache(struct nx_flowswitch *, boolean_t);
236 static void fsw_drain_channels(struct nx_flowswitch *, uint64_t, boolean_t);
237 static uint32_t fsw_process_deferred(struct nx_flowswitch *);
238 static uint32_t fsw_process_linger(struct nx_flowswitch *, uint32_t *);
239 
240 static int copy_packet_from_dev(struct nx_flowswitch *, struct __kern_packet *,
241     struct __kern_packet *);
242 
243 static void fsw_ifp_inc_traffic_class_in_pkt(struct ifnet *, kern_packet_t);
244 static void fsw_ifp_inc_traffic_class_out_pkt(struct ifnet *, uint32_t,
245     uint32_t, uint32_t);
246 
247 static int __fsw_dp_inited = 0;
248 
249 int
fsw_dp_init(void)250 fsw_dp_init(void)
251 {
252 	_CASSERT(FSW_VP_DEV == 0);
253 	_CASSERT(FSW_VP_HOST == 1);
254 	_CASSERT((FSW_VP_HOST + FSW_VP_DEV) < FSW_VP_USER_MIN);
255 	_CASSERT((FSW_VP_HOST + FSW_VP_DEV) < NEXUS_PORT_FLOW_SWITCH_CLIENT);
256 
257 	ASSERT(!__fsw_dp_inited);
258 
259 	flow_mgr_init();
260 	flow_init();
261 
262 	__fsw_dp_inited = 1;
263 
264 	return 0;
265 }
266 
267 void
fsw_dp_uninit(void)268 fsw_dp_uninit(void)
269 {
270 	if (__fsw_dp_inited) {
271 		flow_fini();
272 		flow_mgr_fini();
273 
274 		__fsw_dp_inited = 0;
275 	}
276 }
277 
278 static void
dp_free_pktq(struct nx_flowswitch * fsw __sk_unused,struct pktq * pktq)279 dp_free_pktq(struct nx_flowswitch *fsw __sk_unused, struct pktq *pktq)
280 {
281 	pp_free_pktq(pktq);
282 }
283 
284 #define dp_drop_pktq(fsw, pktq) do { \
285 	uint32_t _len = KPKTQ_LEN(pktq); \
286 	if (KPKTQ_EMPTY(pktq)) { \
287 	        ASSERT(_len == 0); \
288 	        return; \
289 	} \
290 	SK_DF(SK_VERB_FSW_DP | SK_VERB_DROP, "drop %d packets", _len); \
291 	FSW_STATS_ADD(FSW_STATS_DROP, _len); \
292 	DTRACE_SKYWALK1(fsw__dp__drop, int, _len); \
293 	dp_free_pktq(fsw, pktq); \
294 } while (0)
295 
296 SK_NO_INLINE_ATTRIBUTE
297 void
fsw_snoop(struct nx_flowswitch * fsw,struct flow_entry * fe,bool input)298 fsw_snoop(struct nx_flowswitch *fsw, struct flow_entry *fe, bool input)
299 {
300 	pid_t pid;
301 	char proc_name_buf[FLOW_PROCESS_NAME_LENGTH];
302 	char *proc_name = NULL;
303 	pid_t epid;
304 	char eproc_name_buf[FLOW_PROCESS_NAME_LENGTH];
305 	char *eproc_name = NULL;
306 	sa_family_t af;
307 	bool tap_early = false;
308 	struct __kern_packet *pkt;
309 
310 	ASSERT(fe != NULL);
311 	ASSERT(fsw->fsw_ifp != NULL);
312 
313 	if (fe->fe_nx_port == FSW_VP_HOST) {
314 		/* allow packets to be tapped before aggregation happens */
315 		tap_early = (input && fe->fe_key.fk_proto == IPPROTO_TCP);
316 		if (!tap_early) {
317 			/* all other traffic will be tapped in the dlil input path */
318 			return;
319 		}
320 	}
321 	if (fe->fe_key.fk_ipver == IPVERSION) {
322 		af = AF_INET;
323 	} else if (fe->fe_key.fk_ipver == IPV6_VERSION) {
324 		af = AF_INET6;
325 	} else {
326 		return;
327 	}
328 
329 	pid = fe->fe_pid;
330 	if (fe->fe_proc_name[0] != '\0') {
331 		(void) strlcpy(proc_name_buf, fe->fe_proc_name,
332 		    sizeof(proc_name_buf));
333 		proc_name = proc_name_buf;
334 	}
335 	epid = fe->fe_epid;
336 	if (fe->fe_eproc_name[0] != '\0') {
337 		(void) strlcpy(eproc_name_buf, fe->fe_eproc_name,
338 		    sizeof(eproc_name_buf));
339 		eproc_name = eproc_name_buf;
340 	}
341 	if (input) {
342 		KPKTQ_FOREACH(pkt, &fe->fe_rx_pktq) {
343 			pktap_input_packet(fsw->fsw_ifp, af,
344 			    fsw->fsw_ifp_dlt, pid, proc_name, epid,
345 			    eproc_name, SK_PKT2PH(pkt), NULL, 0,
346 			    IPPROTO_TCP, fe->fe_flowid,
347 			    tap_early ? PTH_FLAG_SOCKET: PTH_FLAG_NEXUS_CHAN);
348 		}
349 	} else {
350 		KPKTQ_FOREACH(pkt, &fe->fe_tx_pktq) {
351 			pktap_output_packet(fsw->fsw_ifp, af,
352 			    fsw->fsw_ifp_dlt, pid, proc_name, epid,
353 			    eproc_name, SK_PKT2PH(pkt), NULL, 0,
354 			    0, 0, PTH_FLAG_NEXUS_CHAN);
355 		}
356 	}
357 }
358 
359 #if (DEVELOPMENT || DEBUG)
360 static void
_fsw_error35_handler(int step,struct flow_route * fr,struct __kern_packet * pkt,int * ret)361 _fsw_error35_handler(int step, struct flow_route *fr, struct __kern_packet *pkt,
362     int *ret)
363 {
364 	static boolean_t _err35_flag_modified = FALSE;
365 
366 	switch (step) {
367 	case 1:
368 		if ((fr->fr_flags & (FLOWRTF_RESOLVED | FLOWRTF_HAS_LLINFO)) ==
369 		    (FLOWRTF_RESOLVED | FLOWRTF_HAS_LLINFO)) {
370 			fr->fr_flags &= ~FLOWRTF_RESOLVED;
371 			_err35_flag_modified = TRUE;
372 		}
373 		break;
374 
375 	case 2:
376 		if (!_err35_flag_modified) {
377 			return;
378 		}
379 		if (pkt->pkt_pflags & PKT_F_MBUF_DATA) {
380 			m_freem(pkt->pkt_mbuf);
381 			pkt->pkt_pflags &= ~PKT_F_MBUF_DATA;
382 			pkt->pkt_mbuf = NULL;
383 		}
384 		*ret = EJUSTRETURN;
385 		fr->fr_flags |= FLOWRTF_RESOLVED;
386 		_err35_flag_modified = FALSE;
387 		break;
388 
389 	default:
390 		VERIFY(0);
391 		/* not reached */
392 	}
393 }
394 
395 static void
_fsw_error36_handler(int step,struct flow_route * fr,int * ret)396 _fsw_error36_handler(int step, struct flow_route *fr, int *ret)
397 {
398 	static boolean_t _err36_flag_modified = FALSE;
399 
400 	switch (step) {
401 	case 1:
402 		if ((fr->fr_flags & (FLOWRTF_RESOLVED | FLOWRTF_HAS_LLINFO)) ==
403 		    (FLOWRTF_RESOLVED | FLOWRTF_HAS_LLINFO)) {
404 			fr->fr_flags &= ~FLOWRTF_RESOLVED;
405 			_err36_flag_modified = TRUE;
406 		}
407 		break;
408 
409 	case 2:
410 		if (!_err36_flag_modified) {
411 			return;
412 		}
413 		*ret = ENETUNREACH;
414 		fr->fr_flags |= FLOWRTF_RESOLVED;
415 		_err36_flag_modified = FALSE;
416 		break;
417 
418 	default:
419 		VERIFY(0);
420 		/* not reached */
421 	}
422 }
423 #else /* !DEVELOPMENT && !DEBUG */
424 #define _fsw_error35_handler(...)
425 #define _fsw_error36_handler(...)
426 #endif /* DEVELOPMENT || DEBUG */
427 
428 /*
429  * Check if the source packet content can fit into the destination
430  * ring's packet. Returns TRUE if the source packet can fit.
431  * Note: Failures could be caused by misconfigured packet pool sizes,
432  * missing packet size check again MTU or if the source packet is from
433  * a compat netif and the attached mbuf is larger than MTU due to LRO.
434  */
435 static inline boolean_t
validate_pkt_len(struct __kern_packet * spkt,kern_packet_t dph,uint32_t skip_l2hlen,uint32_t l2hlen,uint16_t headroom,uint32_t * copy_len)436 validate_pkt_len(struct __kern_packet *spkt, kern_packet_t dph,
437     uint32_t skip_l2hlen, uint32_t l2hlen, uint16_t headroom,
438     uint32_t *copy_len)
439 {
440 	uint32_t tlen = 0;
441 	uint32_t splen = spkt->pkt_length - skip_l2hlen;
442 
443 	if (l2hlen != 0) {
444 		VERIFY(skip_l2hlen == 0);
445 		tlen += l2hlen;
446 	} else if ((spkt->pkt_link_flags & PKT_LINKF_ETHFCS) != 0) {
447 		splen -= ETHER_CRC_LEN;
448 	}
449 
450 	tlen += splen;
451 	*copy_len = splen;
452 
453 	return tlen <= ((__packet_get_buflet_count(dph) *
454 	       PP_BUF_SIZE_DEF(SK_PTR_ADDR_KPKT(dph)->pkt_qum.qum_pp)) -
455 	       headroom);
456 }
457 
458 #if SK_LOG
459 /* Hoisted out of line to reduce kernel stack footprint */
460 SK_LOG_ATTRIBUTE
461 static void
copy_packet_from_dev_log(struct __kern_packet * spkt,struct __kern_packet * dpkt,struct proc * p)462 copy_packet_from_dev_log(struct __kern_packet *spkt,
463     struct __kern_packet *dpkt, struct proc *p)
464 {
465 	uint64_t logflags = ((SK_VERB_FSW | SK_VERB_RX) |
466 	    ((spkt->pkt_pflags & PKT_F_MBUF_DATA) ?
467 	    SK_VERB_COPY_MBUF : SK_VERB_COPY));
468 	char *daddr;
469 	MD_BUFLET_ADDR_ABS(dpkt, daddr);
470 	SK_DF(logflags, "%s(%d) splen %u dplen %u hr %u l2 %u",
471 	    sk_proc_name_address(p), sk_proc_pid(p), spkt->pkt_length,
472 	    dpkt->pkt_length, (uint32_t)dpkt->pkt_headroom,
473 	    (uint32_t)dpkt->pkt_l2_len);
474 	SK_DF(logflags | SK_VERB_DUMP, "%s",
475 	    sk_dump("buf", daddr, dpkt->pkt_length, 128, NULL, 0));
476 }
477 #else
478 #define copy_packet_from_dev_log(...)
479 #endif /* SK_LOG */
480 
481 
482 static inline int
copy_packet_from_dev(struct nx_flowswitch * fsw,struct __kern_packet * spkt,struct __kern_packet * dpkt)483 copy_packet_from_dev(struct nx_flowswitch *fsw, struct __kern_packet *spkt,
484     struct __kern_packet *dpkt)
485 {
486 	/*
487 	 * source and destination nexus don't share the packet pool
488 	 * sync operation here is to
489 	 * - alloc packet for the rx(dst) ring
490 	 * - copy data/metadata from src packet to dst packet
491 	 * - attach alloc'd packet to rx(dst) ring
492 	 */
493 	kern_packet_t dph = SK_PTR_ENCODE(dpkt,
494 	    METADATA_TYPE(dpkt), METADATA_SUBTYPE(dpkt));
495 	kern_packet_t sph = SK_PTR_ENCODE(spkt, METADATA_TYPE(spkt),
496 	    METADATA_SUBTYPE(spkt));
497 	boolean_t do_cksum_rx;
498 	uint16_t skip_l2h_len = spkt->pkt_l2_len;
499 	uint16_t iphlen;
500 	uint32_t dlen;
501 	int err;
502 
503 	if (__improbable(!validate_pkt_len(spkt, dph, skip_l2h_len, 0, 0,
504 	    &dlen))) {
505 		SK_ERR("bufcnt %d, bufsz %d", __packet_get_buflet_count(dph),
506 		    PP_BUF_SIZE_DEF(dpkt->pkt_qum.qum_pp));
507 		FSW_STATS_INC(FSW_STATS_RX_COPY_BAD_LEN);
508 		return EINVAL;
509 	}
510 
511 	/* Copy packet metadata */
512 	_QUM_COPY(&(spkt)->pkt_qum, &(dpkt)->pkt_qum);
513 	_PKT_COPY(spkt, dpkt);
514 	ASSERT(!(dpkt->pkt_qum.qum_qflags & QUM_F_KERNEL_ONLY) ||
515 	    PP_KERNEL_ONLY(dpkt->pkt_qum.qum_pp));
516 	ASSERT(dpkt->pkt_mbuf == NULL);
517 
518 	dpkt->pkt_headroom = 0;
519 	dpkt->pkt_l2_len = 0;
520 
521 	/* don't include IP header from partial sum */
522 	if (__probable((spkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED) != 0)) {
523 		iphlen = spkt->pkt_flow_ip_hlen;
524 		do_cksum_rx = sk_cksum_rx;
525 	} else {
526 		iphlen = 0;
527 		do_cksum_rx = FALSE;
528 	}
529 
530 	/* Copy packet payload */
531 	if ((spkt->pkt_pflags & PKT_F_MBUF_DATA) &&
532 	    (spkt->pkt_pflags & PKT_F_TRUNCATED)) {
533 		FSW_STATS_INC(FSW_STATS_RX_COPY_MBUF2PKT);
534 		/*
535 		 * Source packet has truncated contents (just enough for
536 		 * the classifer) of an mbuf from the compat driver; copy
537 		 * the entire entire mbuf contents to destination packet.
538 		 */
539 		m_adj(spkt->pkt_mbuf, skip_l2h_len);
540 		ASSERT((uint32_t)m_pktlen(spkt->pkt_mbuf) >= dlen);
541 		fsw->fsw_pkt_copy_from_mbuf(NR_RX, dph, 0,
542 		    spkt->pkt_mbuf, 0, dlen, do_cksum_rx, iphlen);
543 	} else {
544 		FSW_STATS_INC(FSW_STATS_RX_COPY_PKT2PKT);
545 		/*
546 		 * Source packet has full contents, either from an mbuf
547 		 * that came up from the compat driver, or because it
548 		 * originated on the native driver; copy to destination.
549 		 */
550 		fsw->fsw_pkt_copy_from_pkt(NR_RX, dph, 0, sph,
551 		    (spkt->pkt_headroom + spkt->pkt_l2_len), dlen, do_cksum_rx,
552 		    iphlen, 0, FALSE);
553 	}
554 
555 #if DEBUG || DEVELOPMENT
556 	if (__improbable(pkt_trailers > 0)) {
557 		dlen += pkt_add_trailers(dph, dlen, iphlen);
558 	}
559 #endif /* DEBUG || DEVELOPMENT */
560 
561 	/* Finalize and attach packet to Rx ring */
562 	METADATA_ADJUST_LEN(dpkt, 0, 0);
563 	err = __packet_finalize(dph);
564 	VERIFY(err == 0);
565 
566 	copy_packet_from_dev_log(spkt, dpkt, kernproc);
567 
568 	if (spkt->pkt_pflags & PKT_F_MBUF_DATA) {
569 		ifp_inc_traffic_class_in(fsw->fsw_ifp, spkt->pkt_mbuf);
570 		mbuf_free(spkt->pkt_mbuf);
571 		KPKT_CLEAR_MBUF_DATA(spkt);
572 	} else {
573 		fsw_ifp_inc_traffic_class_in_pkt(fsw->fsw_ifp, dph);
574 	}
575 
576 	if (__probable(do_cksum_rx != 0)) {
577 		FSW_STATS_INC(FSW_STATS_RX_COPY_SUM);
578 	}
579 
580 	return 0;
581 }
582 
583 SK_NO_INLINE_ATTRIBUTE
584 static struct __kern_packet *
rx_process_ip_frag(struct nx_flowswitch * fsw,struct __kern_packet * pkt)585 rx_process_ip_frag(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
586 {
587 	char *pkt_buf;
588 	void *l3_hdr;
589 	uint16_t nfrags, tlen;
590 	int err = 0;
591 
592 	switch (fsw_ip_reass) {
593 	case FSW_IP_REASS_FORCE_OFF:
594 		return pkt;
595 	case FSW_IP_REASS_FORCE_ON:
596 		break;
597 	default:
598 		if (!FSW_NETAGENT_ENABLED(fsw) ||
599 		    flow_mgr_get_num_flows(fsw->fsw_flow_mgr) == 0) {
600 			return pkt;
601 		}
602 		break;
603 	}
604 
605 	MD_BUFLET_ADDR_ABS(pkt, pkt_buf);
606 	l3_hdr = pkt_buf + pkt->pkt_headroom + pkt->pkt_l2_len;
607 
608 	ASSERT(fsw->fsw_ipfm != NULL);
609 	ASSERT((pkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED) != 0);
610 
611 	if (pkt->pkt_flow_ip_ver == IPVERSION) {
612 		err = fsw_ip_frag_reass_v4(fsw->fsw_ipfm, &pkt,
613 		    (struct ip *)l3_hdr, &nfrags, &tlen);
614 	} else {
615 		ASSERT(pkt->pkt_flow_ip_ver == IPV6_VERSION);
616 		/* we only handle frag header immediately after v6 header */
617 		err = fsw_ip_frag_reass_v6(fsw->fsw_ipfm, &pkt,
618 		    (struct ip6_hdr *)l3_hdr,
619 		    (struct ip6_frag *)((uintptr_t)l3_hdr + sizeof(struct ip6_hdr)),
620 		    &nfrags, &tlen);
621 	}
622 	if (__improbable(err != 0)) {
623 		/* if we get a bad fragment, free it */
624 		pp_free_packet_single(pkt);
625 		pkt = NULL;
626 	} else {
627 		ASSERT(!((pkt != NULL) ^ (nfrags > 0)));
628 	}
629 
630 	return pkt;
631 }
632 
633 SK_NO_INLINE_ATTRIBUTE
634 static void
rx_prepare_packet_mbuf(struct nx_flowswitch * fsw,struct __kern_packet * pkt)635 rx_prepare_packet_mbuf(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
636 {
637 	ASSERT(pkt->pkt_pflags & PKT_F_MBUF_DATA);
638 	uint32_t mlen = (uint32_t)m_pktlen(pkt->pkt_mbuf);
639 	kern_packet_t ph =  SK_PTR_ENCODE(pkt,
640 	    METADATA_TYPE(pkt), METADATA_SUBTYPE(pkt));
641 	/*
642 	 * This is the case when the packet is coming in from
643 	 * compat-netif. This packet only has valid metadata
644 	 * and an attached mbuf. We need to copy enough data
645 	 * from the mbuf to the packet buffer for the
646 	 * classifier. Compat netif packet pool is configured
647 	 * with buffer size of NETIF_COMPAT_MAX_MBUF_DATA_COPY
648 	 * which is just enough to hold the protocol headers
649 	 * for the flowswitch classifier.
650 	 */
651 
652 	pkt->pkt_headroom = 0;
653 	METADATA_ADJUST_LEN(pkt, 0, 0);
654 	/*
655 	 * Copy the initial 128 bytes of the packet for
656 	 * classification.
657 	 * Ethernet(14) + IPv6 header(40) +
658 	 * + IPv6 fragment header(8) +
659 	 * TCP header with options(60).
660 	 */
661 	fsw->fsw_pkt_copy_from_mbuf(NR_RX, ph,
662 	    pkt->pkt_headroom, pkt->pkt_mbuf, 0,
663 	    MIN(mlen, NETIF_COMPAT_MAX_MBUF_DATA_COPY),
664 	    FALSE, 0);
665 
666 	int err = __packet_finalize_with_mbuf(pkt);
667 	VERIFY(err == 0);
668 }
669 
670 static struct __kern_packet *
rx_prepare_packet(struct nx_flowswitch * fsw,struct __kern_packet * pkt)671 rx_prepare_packet(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
672 {
673 	pkt->pkt_qum_qflags &= ~QUM_F_FLOW_CLASSIFIED;
674 
675 	if (__improbable(pkt->pkt_pflags & PKT_F_MBUF_DATA)) {
676 		rx_prepare_packet_mbuf(fsw, pkt);
677 	}
678 
679 	return pkt;
680 }
681 
682 static struct flow_entry *
lookup_flow_with_pkt(struct nx_flowswitch * fsw,struct __kern_packet * pkt,bool input,struct flow_entry * prev_fe)683 lookup_flow_with_pkt(struct nx_flowswitch *fsw, struct __kern_packet *pkt,
684     bool input, struct flow_entry *prev_fe)
685 {
686 	struct flow_key key __sk_aligned(16);
687 	struct flow_entry *fe = NULL;
688 
689 	ASSERT(pkt->pkt_qum_qflags & QUM_F_FLOW_CLASSIFIED);
690 	flow_pkt2key(pkt, input, &key);
691 
692 	if (__probable(prev_fe != NULL &&
693 	    prev_fe->fe_key.fk_mask == FKMASK_5TUPLE)) {
694 		uint16_t saved_mask = key.fk_mask;
695 		key.fk_mask = FKMASK_5TUPLE;
696 		if (flow_key_cmp_mask(&prev_fe->fe_key, &key, &fk_mask_5tuple) == 0) {
697 			flow_entry_retain(prev_fe);
698 			fe = prev_fe;
699 		} else {
700 			key.fk_mask = saved_mask;
701 		}
702 	}
703 
704 top:
705 	if (__improbable(fe == NULL)) {
706 		fe = flow_mgr_find_fe_by_key(fsw->fsw_flow_mgr, &key);
707 	}
708 
709 	if (__improbable(fe != NULL &&
710 	    (fe->fe_flags & (FLOWENT_PARENT | FLOWENT_CHILD)) != 0)) {
711 		/* Rx */
712 		if (input) {
713 			if (fe->fe_flags & FLOWENT_PARENT) {
714 				struct flow_entry *child_fe = rx_lookup_child_flow(fsw, fe, pkt);
715 				if (child_fe != NULL) {
716 					flow_entry_release(&fe);
717 					fe = child_fe;
718 				}
719 			} else {
720 				if (!rx_flow_demux_match(fsw, fe, pkt)) {
721 					flow_entry_release(&fe);
722 					fe = NULL;
723 					goto top;
724 				}
725 			}
726 		} else {
727 			/* Tx */
728 			if (__improbable(!_UUID_MATCH(pkt->pkt_flow_id, fe->fe_uuid))) {
729 				if (__probable(fe->fe_flags & FLOWENT_PARENT)) {
730 					struct flow_entry *parent_fe = fe;
731 					fe = tx_lookup_child_flow(parent_fe, pkt->pkt_flow_id);
732 					flow_entry_release(&parent_fe);
733 				} else {
734 					flow_entry_release(&fe);
735 					fe = NULL;
736 					goto top;
737 				}
738 			}
739 		}
740 	}
741 
742 	SK_LOG_VAR(char fkbuf[FLOWKEY_DBGBUF_SIZE]);
743 	SK_DF(SK_VERB_FSW_DP | SK_VERB_LOOKUP,
744 	    "%s %s %s \"%s\" fe 0x%llx",
745 	    input ? "Rx" : "Tx", if_name(fsw->fsw_ifp),
746 	    sk_proc_name_address(current_proc()),
747 	    fk_as_string(&key, fkbuf, sizeof(fkbuf)),
748 	    SK_KVA(fe));
749 
750 	return fe;
751 }
752 
753 static struct flow_entry *
rx_lookup_flow(struct nx_flowswitch * fsw,struct __kern_packet * pkt,struct flow_entry * prev_fe)754 rx_lookup_flow(struct nx_flowswitch *fsw, struct __kern_packet *pkt,
755     struct flow_entry *prev_fe)
756 {
757 	struct flow_entry *fe;
758 
759 	fe = lookup_flow_with_pkt(fsw, pkt, true, prev_fe);
760 	_FSW_INJECT_ERROR(2, fe, NULL, flow_entry_release, &fe);
761 	if (fe == NULL) {
762 		FSW_STATS_INC(FSW_STATS_RX_FLOW_NOT_FOUND);
763 		return NULL;
764 	}
765 
766 	if (__improbable(fe->fe_flags & FLOWENTF_TORN_DOWN)) {
767 		FSW_STATS_INC(FSW_STATS_RX_FLOW_TORNDOWN);
768 		SK_DF(SK_VERB_FSW_DP | SK_VERB_RX | SK_VERB_FLOW,
769 		    "Rx flow torn down");
770 		flow_entry_release(&fe);
771 		fe = NULL;
772 	}
773 
774 	return fe;
775 }
776 
777 static inline void
rx_flow_batch_packet(struct flow_entry_list * fes,struct flow_entry * fe,struct __kern_packet * pkt)778 rx_flow_batch_packet(struct flow_entry_list *fes, struct flow_entry *fe,
779     struct __kern_packet *pkt)
780 {
781 	if (__improbable(pkt->pkt_flow_ip_is_frag)) {
782 		fe->fe_rx_frag_count++;
783 	}
784 
785 	/* KPKTQ_ENQUEUE_LIST is needed until frags become chained buflet */
786 	if (KPKTQ_EMPTY(&fe->fe_rx_pktq)) {
787 		ASSERT(KPKTQ_LEN(&fe->fe_rx_pktq) == 0);
788 		TAILQ_INSERT_TAIL(fes, fe, fe_rx_link);
789 		KPKTQ_ENQUEUE_LIST(&fe->fe_rx_pktq, pkt);
790 	} else {
791 		ASSERT(!TAILQ_EMPTY(fes));
792 		KPKTQ_ENQUEUE_LIST(&fe->fe_rx_pktq, pkt);
793 		flow_entry_release(&fe);
794 	}
795 }
796 
797 static void
tx_flow_batch_packet(struct flow_entry_list * fes,struct flow_entry * fe,struct __kern_packet * pkt)798 tx_flow_batch_packet(struct flow_entry_list *fes, struct flow_entry *fe,
799     struct __kern_packet *pkt)
800 {
801 	/* record frag continuation */
802 	if (__improbable(pkt->pkt_flow_ip_is_first_frag)) {
803 		ASSERT(pkt->pkt_flow_ip_is_frag);
804 		fe->fe_tx_is_cont_frag = true;
805 		fe->fe_tx_frag_id = pkt->pkt_flow_ip_frag_id;
806 	} else if (__probable(!pkt->pkt_flow_ip_is_frag)) {
807 		fe->fe_tx_is_cont_frag = false;
808 		fe->fe_tx_frag_id = 0;
809 	}
810 
811 	if (KPKTQ_EMPTY(&fe->fe_tx_pktq)) {
812 		ASSERT(KPKTQ_LEN(&fe->fe_tx_pktq) == 0);
813 		TAILQ_INSERT_TAIL(fes, fe, fe_tx_link);
814 		KPKTQ_ENQUEUE(&fe->fe_tx_pktq, pkt);
815 	} else {
816 		ASSERT(!TAILQ_EMPTY(fes));
817 		KPKTQ_ENQUEUE(&fe->fe_tx_pktq, pkt);
818 		flow_entry_release(&fe);
819 	}
820 }
821 
822 static inline void
fsw_ring_dequeue_pktq(struct nx_flowswitch * fsw,struct __kern_channel_ring * r,uint32_t n_pkts_max,struct pktq * pktq,uint32_t * n_bytes)823 fsw_ring_dequeue_pktq(struct nx_flowswitch *fsw, struct __kern_channel_ring *r,
824     uint32_t n_pkts_max, struct pktq *pktq, uint32_t *n_bytes)
825 {
826 	uint32_t n_pkts = 0;
827 
828 	KPKTQ_INIT(pktq);
829 
830 	slot_idx_t idx, idx_end;
831 	idx = r->ckr_khead;
832 	idx_end = r->ckr_rhead;
833 	struct nexus_vp_adapter *vpna = VPNA(KRNA(r));
834 
835 	*n_bytes = 0;
836 	for (; n_pkts < n_pkts_max && idx != idx_end;
837 	    idx = SLOT_NEXT(idx, r->ckr_lim)) {
838 		struct __kern_slot_desc *ksd = KR_KSD(r, idx);
839 		struct __kern_packet *pkt = ksd->sd_pkt;
840 
841 		ASSERT(pkt->pkt_nextpkt == NULL);
842 		KR_SLOT_DETACH_METADATA(r, ksd);
843 
844 		_FSW_INJECT_ERROR(20, pkt->pkt_qum_qflags,
845 		    pkt->pkt_qum_qflags | QUM_F_DROPPED, null_func);
846 		if (__improbable(((pkt->pkt_qum_qflags & QUM_F_DROPPED) != 0))
847 		    || (pkt->pkt_length == 0)) {
848 			FSW_STATS_INC(FSW_STATS_DROP);
849 			pp_free_packet_single(pkt);
850 			continue;
851 		}
852 		if (NA_CHANNEL_EVENT_ATTACHED(&vpna->vpna_up)) {
853 			__packet_set_tx_nx_port(SK_PKT2PH(pkt),
854 			    vpna->vpna_nx_port, vpna->vpna_gencnt);
855 		}
856 
857 		n_pkts++;
858 		*n_bytes += pkt->pkt_length;
859 
860 		KPKTQ_ENQUEUE(pktq, pkt);
861 	}
862 
863 	r->ckr_khead = idx;
864 	r->ckr_ktail = SLOT_PREV(idx, r->ckr_lim);
865 }
866 
867 static void
fsw_ring_enqueue_pktq(struct nx_flowswitch * fsw,struct __kern_channel_ring * r,struct pktq * pktq)868 fsw_ring_enqueue_pktq(struct nx_flowswitch *fsw, struct __kern_channel_ring *r,
869     struct pktq *pktq)
870 {
871 #pragma unused(fsw)
872 	struct __kern_packet *pkt;
873 	struct __kern_quantum *kqum;
874 	uint32_t kr_space_avail = 0;
875 	uint32_t n, n_pkts = 0, n_bytes = 0;
876 	slot_idx_t idx = 0, idx_start = 0, idx_end = 0;
877 
878 	kr_enter(r, TRUE);
879 
880 	idx_start = r->ckr_ktail;
881 	kr_space_avail = kr_available_slots_rxring(r);
882 	_FSW_INJECT_ERROR(40, kr_space_avail, 0, null_func);
883 	n = MIN(kr_space_avail, KPKTQ_LEN(pktq));
884 	_FSW_INJECT_ERROR(41, n, 0, null_func);
885 	idx_end = SLOT_INCREMENT(idx_start, n, r->ckr_lim);
886 
887 	idx = idx_start;
888 	while (idx != idx_end) {
889 		KPKTQ_DEQUEUE(pktq, pkt);
890 		kqum = SK_PTR_ADDR_KQUM(pkt);
891 		kqum->qum_qflags |= QUM_F_FINALIZED;
892 		n_pkts++;
893 		n_bytes += pkt->pkt_length;
894 		KR_SLOT_ATTACH_METADATA(r, KR_KSD(r, idx), kqum);
895 		if (__improbable(pkt->pkt_trace_id != 0)) {
896 			KDBG(SK_KTRACE_PKT_RX_FSW | DBG_FUNC_END, pkt->pkt_trace_id);
897 			KDBG(SK_KTRACE_PKT_RX_CHN | DBG_FUNC_START, pkt->pkt_trace_id);
898 		}
899 		idx = SLOT_NEXT(idx, r->ckr_lim);
900 	}
901 
902 	kr_update_stats(r, n_pkts, n_bytes);
903 
904 	/*
905 	 * ensure slot attachments are visible before updating the
906 	 * tail pointer
907 	 */
908 	membar_sync();
909 
910 	r->ckr_ktail = idx_end;
911 
912 	kr_exit(r);
913 
914 	r->ckr_na_notify(r, kernproc, NA_NOTEF_PUSH);
915 
916 	SK_DF(SK_VERB_FSW_DP | SK_VERB_RING, "%s enqueued %d pkts",
917 	    r->ckr_name, n_pkts);
918 }
919 
920 static void
pkts_to_pktq(struct __kern_packet * pkts[],uint32_t n_pkts,struct pktq * pktq)921 pkts_to_pktq(struct __kern_packet *pkts[], uint32_t n_pkts, struct pktq *pktq)
922 {
923 	ASSERT(KPKTQ_EMPTY(pktq));
924 
925 	for (uint32_t i = 0; i < n_pkts; i++) {
926 		struct __kern_packet *pkt = pkts[i];
927 		ASSERT(pkt->pkt_nextpkt == NULL);
928 		KPKTQ_ENQUEUE(pktq, pkt);
929 	}
930 }
931 
932 /*
933  * This function is modeled after nx_netif_host_grab_pkts() in nx_netif_host.c.
934  */
935 SK_NO_INLINE_ATTRIBUTE
936 static void
convert_native_pktq_to_mbufs(struct nx_flowswitch * fsw,struct pktq * pktq,struct mbuf ** m_headp,struct mbuf ** m_tailp,uint32_t * cnt,uint32_t * bytes)937 convert_native_pktq_to_mbufs(struct nx_flowswitch *fsw, struct pktq *pktq,
938     struct mbuf **m_headp, struct mbuf **m_tailp, uint32_t *cnt, uint32_t *bytes)
939 {
940 	uint32_t tot_cnt;
941 	unsigned int num_segs = 1;
942 	struct mbuf *mhead, *head = NULL, *tail = NULL, **tailp = &head;
943 	uint32_t mhead_cnt, mhead_bufsize;
944 	uint32_t mhead_waste = 0;
945 	uint32_t mcnt = 0, mbytes = 0;
946 	uint32_t largest, max_pkt_len;
947 	struct __kern_packet *pkt;
948 	struct kern_pbufpool *pp;
949 
950 	tot_cnt = KPKTQ_LEN(pktq);
951 	ASSERT(tot_cnt > 0);
952 	mhead_cnt = tot_cnt;
953 
954 	/*
955 	 * Opportunistically batch-allocate the mbufs based on the largest
956 	 * packet size we've seen in the recent past.  Note that we reset
957 	 * fe_rx_largest_size below if we notice that we're under-utilizing the
958 	 * allocated buffers (thus disabling this batch allocation).
959 	 */
960 	largest = *(volatile uint32_t*)&fsw->fsw_rx_largest_size; /* read once */
961 	if (__probable(largest != 0)) {
962 		if (largest <= MCLBYTES) {
963 			mhead = m_allocpacket_internal(&mhead_cnt, MCLBYTES,
964 			    &num_segs, M_NOWAIT, 1, 0);
965 			mhead_bufsize = MCLBYTES;
966 		} else if (largest <= MBIGCLBYTES) {
967 			mhead = m_allocpacket_internal(&mhead_cnt, MBIGCLBYTES,
968 			    &num_segs, M_NOWAIT, 1, 0);
969 			mhead_bufsize = MBIGCLBYTES;
970 		} else if (largest <= M16KCLBYTES) {
971 			mhead = m_allocpacket_internal(&mhead_cnt, M16KCLBYTES,
972 			    &num_segs, M_NOWAIT, 1, 0);
973 			mhead_bufsize = M16KCLBYTES;
974 		} else if (largest <= M16KCLBYTES * 2) {
975 			num_segs = 2;
976 			mhead = m_allocpacket_internal(&mhead_cnt, M16KCLBYTES * 2,
977 			    &num_segs, M_NOWAIT, 1, 0);
978 			mhead_bufsize = M16KCLBYTES * 2;
979 		} else {
980 			mhead = NULL;
981 			mhead_bufsize = mhead_cnt = 0;
982 		}
983 	} else {
984 		mhead = NULL;
985 		mhead_bufsize = mhead_cnt = 0;
986 	}
987 	DTRACE_SKYWALK4(bufstats, uint32_t, largest, uint32_t, mhead_bufsize,
988 	    uint32_t, mhead_cnt, uint32_t, tot_cnt);
989 
990 	pp = __DECONST(struct kern_pbufpool *, KPKTQ_FIRST(pktq)->pkt_qum.qum_pp);
991 	max_pkt_len = PP_BUF_SIZE_DEF(pp) * pp->pp_max_frags;
992 
993 	KPKTQ_FOREACH(pkt, pktq) {
994 		uint32_t tot_len, len;
995 		uint16_t pad, llhlen, iphlen;
996 		boolean_t do_cksum_rx;
997 		struct mbuf *m;
998 		int error;
999 
1000 		llhlen = pkt->pkt_l2_len;
1001 		len = pkt->pkt_length;
1002 		if (__improbable(len > max_pkt_len || llhlen > len)) {
1003 			DTRACE_SKYWALK2(bad__len, struct nx_flowswitch *, fsw,
1004 			    struct __kern_packet *, pkt);
1005 			FSW_STATS_INC(FSW_STATS_DROP);
1006 			FSW_STATS_INC(FSW_STATS_RX_COPY_BAD_LEN);
1007 			continue;
1008 		}
1009 		/* begin payload on 32-bit boundary; figure out the padding */
1010 		pad = (uint16_t)P2ROUNDUP(llhlen, sizeof(uint32_t)) - llhlen;
1011 		tot_len = pad + len;
1012 
1013 		/* remember largest packet size */
1014 		if (__improbable(largest < tot_len)) {
1015 			largest = MAX(tot_len, MCLBYTES);
1016 		}
1017 
1018 		/*
1019 		 * If the above batch allocation returned partial
1020 		 * success, we try a blocking allocation here again.
1021 		 */
1022 		m = mhead;
1023 		if (__improbable(m == NULL || tot_len > mhead_bufsize)) {
1024 			ASSERT(mhead != NULL || mhead_cnt == 0);
1025 			num_segs = 1;
1026 			if (tot_len > M16KCLBYTES) {
1027 				num_segs = 0;
1028 			}
1029 			if ((error = mbuf_allocpacket(MBUF_DONTWAIT, tot_len,
1030 			    &num_segs, &m)) != 0) {
1031 				DTRACE_SKYWALK2(bad__len,
1032 				    struct nx_flowswitch *, fsw,
1033 				    struct __kern_packet *, pkt);
1034 				FSW_STATS_INC(FSW_STATS_DROP_NOMEM_MBUF);
1035 				FSW_STATS_INC(FSW_STATS_DROP);
1036 				continue;
1037 			}
1038 		} else {
1039 			mhead = m->m_nextpkt;
1040 			m->m_nextpkt = NULL;
1041 			ASSERT(mhead_cnt != 0);
1042 			--mhead_cnt;
1043 
1044 			/* check if we're underutilizing large buffers */
1045 			if (__improbable(mhead_bufsize > MCLBYTES &&
1046 			    tot_len < (mhead_bufsize >> 1))) {
1047 				++mhead_waste;
1048 			}
1049 			if (__improbable(mhead_bufsize >= tot_len + M16KCLBYTES)) {
1050 				FSW_STATS_INC(FSW_STATS_RX_WASTED_16KMBUF);
1051 			}
1052 		}
1053 		m->m_data += pad;
1054 		m->m_pkthdr.pkt_hdr = mtod(m, uint8_t *);
1055 
1056 		/* don't include IP header from partial sum */
1057 		if (__probable((pkt->pkt_qum_qflags &
1058 		    QUM_F_FLOW_CLASSIFIED) != 0)) {
1059 			iphlen = pkt->pkt_flow_ip_hlen;
1060 			do_cksum_rx = sk_cksum_rx;
1061 		} else {
1062 			iphlen = 0;
1063 			do_cksum_rx = FALSE;
1064 		}
1065 
1066 		fsw->fsw_pkt_copy_to_mbuf(NR_RX, SK_PKT2PH(pkt),
1067 		    pkt->pkt_headroom, m, 0, len, do_cksum_rx,
1068 		    llhlen + iphlen);
1069 
1070 		FSW_STATS_INC(FSW_STATS_RX_COPY_PKT2MBUF);
1071 		if (do_cksum_rx) {
1072 			FSW_STATS_INC(FSW_STATS_RX_COPY_SUM);
1073 		}
1074 #if DEBUG || DEVELOPMENT
1075 		if (__improbable(pkt_trailers > 0)) {
1076 			(void) pkt_add_trailers_mbuf(m, llhlen + iphlen);
1077 		}
1078 #endif /* DEBUG || DEVELOPMENT */
1079 		m_adj(m, llhlen);
1080 
1081 		m->m_pkthdr.rcvif = fsw->fsw_ifp;
1082 		if (__improbable((pkt->pkt_link_flags &
1083 		    PKT_LINKF_ETHFCS) != 0)) {
1084 			m->m_flags |= M_HASFCS;
1085 		}
1086 		if (__improbable(pkt->pkt_pflags & PKT_F_WAKE_PKT)) {
1087 			m->m_pkthdr.pkt_flags |= PKTF_WAKE_PKT;
1088 		}
1089 		ASSERT(m->m_nextpkt == NULL);
1090 		tail = m;
1091 		*tailp = m;
1092 		tailp = &m->m_nextpkt;
1093 		mcnt++;
1094 		mbytes += m_pktlen(m);
1095 	}
1096 	/* free any leftovers */
1097 	if (__improbable(mhead != NULL)) {
1098 		DTRACE_SKYWALK1(mhead__leftover, uint32_t, mhead_cnt);
1099 		ASSERT(mhead_cnt != 0);
1100 		(void) m_freem_list(mhead);
1101 		mhead = NULL;
1102 		mhead_cnt = 0;
1103 	}
1104 
1105 	/* reset if most packets (>50%) are smaller than our batch buffers */
1106 	if (__improbable(mhead_waste > ((uint32_t)tot_cnt >> 1))) {
1107 		DTRACE_SKYWALK4(mhead__waste, struct nx_flowswitch *, fsw,
1108 		    struct flow_entry *, NULL, uint32_t, mhead_waste,
1109 		    uint32_t, tot_cnt);
1110 		largest = 0;
1111 	}
1112 
1113 	if (largest != fsw->fsw_rx_largest_size) {
1114 		atomic_set_32(&fsw->fsw_rx_largest_size, largest);
1115 	}
1116 
1117 	pp_free_pktq(pktq);
1118 	*m_headp = head;
1119 	*m_tailp = tail;
1120 	*cnt = mcnt;
1121 	*bytes = mbytes;
1122 }
1123 
1124 /*
1125  * This function only extracts the mbuf from the packet. The caller frees
1126  * the packet.
1127  */
1128 static inline struct mbuf *
convert_compat_pkt_to_mbuf(struct nx_flowswitch * fsw,struct __kern_packet * pkt)1129 convert_compat_pkt_to_mbuf(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
1130 {
1131 	struct mbuf *m;
1132 	struct pkthdr *mhdr;
1133 	uint16_t llhlen;
1134 
1135 	m = pkt->pkt_mbuf;
1136 	ASSERT(m != NULL);
1137 
1138 	llhlen = pkt->pkt_l2_len;
1139 	if (llhlen > pkt->pkt_length) {
1140 		m_freem(m);
1141 		KPKT_CLEAR_MBUF_DATA(pkt);
1142 		DTRACE_SKYWALK2(bad__len, struct nx_flowswitch *, fsw,
1143 		    struct __kern_packet *, pkt);
1144 		FSW_STATS_INC(FSW_STATS_DROP);
1145 		FSW_STATS_INC(FSW_STATS_RX_COPY_BAD_LEN);
1146 		return NULL;
1147 	}
1148 	mhdr = &m->m_pkthdr;
1149 	if ((mhdr->csum_flags & CSUM_DATA_VALID) == 0 &&
1150 	    PACKET_HAS_PARTIAL_CHECKSUM(pkt)) {
1151 		mhdr->csum_flags &= ~CSUM_RX_FLAGS;
1152 		mhdr->csum_flags |= (CSUM_DATA_VALID | CSUM_PARTIAL);
1153 		mhdr->csum_rx_start = pkt->pkt_csum_rx_start_off;
1154 		mhdr->csum_rx_val = pkt->pkt_csum_rx_value;
1155 	}
1156 #if DEBUG || DEVELOPMENT
1157 	uint32_t extra = 0;
1158 	if (__improbable(pkt_trailers > 0)) {
1159 		extra = pkt_add_trailers_mbuf(m, llhlen);
1160 	}
1161 #endif /* DEBUG || DEVELOPMENT */
1162 	m_adj(m, llhlen);
1163 	ASSERT((uint32_t)m_pktlen(m) == ((pkt->pkt_length - llhlen) + extra));
1164 	KPKT_CLEAR_MBUF_DATA(pkt);
1165 	return m;
1166 }
1167 
1168 SK_NO_INLINE_ATTRIBUTE
1169 static void
convert_compat_pktq_to_mbufs(struct nx_flowswitch * fsw,struct pktq * pktq,struct mbuf ** m_head,struct mbuf ** m_tail,uint32_t * cnt,uint32_t * bytes)1170 convert_compat_pktq_to_mbufs(struct nx_flowswitch *fsw, struct pktq *pktq,
1171     struct mbuf **m_head, struct mbuf **m_tail, uint32_t *cnt, uint32_t *bytes)
1172 {
1173 	struct __kern_packet *pkt;
1174 	struct mbuf *m, *head = NULL, *tail = NULL, **tailp = &head;
1175 	uint32_t c = 0, b = 0;
1176 
1177 	KPKTQ_FOREACH(pkt, pktq) {
1178 		m = convert_compat_pkt_to_mbuf(fsw, pkt);
1179 		if (__improbable(m == NULL)) {
1180 			continue;
1181 		}
1182 		tail = m;
1183 		*tailp = m;
1184 		tailp = &m->m_nextpkt;
1185 		c++;
1186 		b += m_pktlen(m);
1187 	}
1188 	pp_free_pktq(pktq);
1189 	*m_head = head;
1190 	*m_tail = tail;
1191 	*cnt = c;
1192 	*bytes = b;
1193 }
1194 
1195 void
fsw_host_sendup(ifnet_t ifp,struct mbuf * m_head,struct mbuf * m_tail,uint32_t cnt,uint32_t bytes)1196 fsw_host_sendup(ifnet_t ifp, struct mbuf *m_head, struct mbuf *m_tail,
1197     uint32_t cnt, uint32_t bytes)
1198 {
1199 	struct ifnet_stat_increment_param s;
1200 
1201 	bzero(&s, sizeof(s));
1202 	s.packets_in = cnt;
1203 	s.bytes_in = bytes;
1204 	dlil_input_handler(ifp, m_head, m_tail, &s, FALSE, NULL);
1205 }
1206 
1207 void
fsw_host_rx(struct nx_flowswitch * fsw,struct pktq * pktq)1208 fsw_host_rx(struct nx_flowswitch *fsw, struct pktq *pktq)
1209 {
1210 	struct mbuf *m_head = NULL, *m_tail = NULL;
1211 	uint32_t cnt = 0, bytes = 0;
1212 	boolean_t compat;
1213 
1214 	ASSERT(!KPKTQ_EMPTY(pktq));
1215 
1216 	/* All packets in the pktq must have the same type */
1217 	compat = ((KPKTQ_FIRST(pktq)->pkt_pflags & PKT_F_MBUF_DATA) != 0);
1218 	if (compat) {
1219 		convert_compat_pktq_to_mbufs(fsw, pktq, &m_head, &m_tail, &cnt,
1220 		    &bytes);
1221 	} else {
1222 		convert_native_pktq_to_mbufs(fsw, pktq, &m_head, &m_tail, &cnt,
1223 		    &bytes);
1224 	}
1225 	if (__improbable(m_head == NULL)) {
1226 		DTRACE_SKYWALK1(empty__head, struct nx_flowswitch *, fsw);
1227 		return;
1228 	}
1229 	fsw_host_sendup(fsw->fsw_ifp, m_head, m_tail, cnt, bytes);
1230 }
1231 
1232 void
fsw_ring_enqueue_tail_drop(struct nx_flowswitch * fsw,struct __kern_channel_ring * r,struct pktq * pktq)1233 fsw_ring_enqueue_tail_drop(struct nx_flowswitch *fsw,
1234     struct __kern_channel_ring *r, struct pktq *pktq)
1235 {
1236 	fsw_ring_enqueue_pktq(fsw, r, pktq);
1237 	FSW_STATS_ADD(FSW_STATS_RX_DST_RING_FULL, KPKTQ_LEN(pktq));
1238 	dp_drop_pktq(fsw, pktq);
1239 }
1240 
1241 static struct nexus_adapter *
flow_get_na(struct nx_flowswitch * fsw,struct flow_entry * fe)1242 flow_get_na(struct nx_flowswitch *fsw, struct flow_entry *fe)
1243 {
1244 	struct kern_nexus *nx = fsw->fsw_nx;
1245 	struct nexus_adapter *na = NULL;
1246 	nexus_port_t port = fe->fe_nx_port;
1247 
1248 	if (port == FSW_VP_DEV || port == FSW_VP_HOST) {
1249 		SK_ERR("dev or host ports have no NA");
1250 		return NULL;
1251 	}
1252 
1253 	if (__improbable(!nx_port_is_valid(nx, port))) {
1254 		SK_DF(SK_VERB_FSW_DP, "%s[%d] port no longer valid",
1255 		    if_name(fsw->fsw_ifp), port);
1256 		return NULL;
1257 	}
1258 
1259 	na = nx_port_get_na(nx, port);
1260 	if (__improbable(na == NULL)) {
1261 		FSW_STATS_INC(FSW_STATS_DST_NXPORT_INVALID);
1262 		SK_DF(SK_VERB_FSW_DP, "%s[%d] NA no longer valid",
1263 		    if_name(fsw->fsw_ifp), port);
1264 		return NULL;
1265 	}
1266 
1267 	if (__improbable(!NA_IS_ACTIVE(na))) {
1268 		FSW_STATS_INC(FSW_STATS_DST_NXPORT_INACTIVE);
1269 		SK_DF(SK_VERB_FSW_DP, "%s[%d] NA no longer active",
1270 		    if_name(fsw->fsw_ifp), port);
1271 		return NULL;
1272 	}
1273 
1274 	if (__improbable(nx_port_is_defunct(nx, port))) {
1275 		FSW_STATS_INC(FSW_STATS_DST_NXPORT_DEFUNCT);
1276 		SK_DF(SK_VERB_FSW_DP, "%s[%d] NA defuncted",
1277 		    if_name(fsw->fsw_ifp), port);
1278 		return NULL;
1279 	}
1280 
1281 	return na;
1282 }
1283 
1284 static inline struct __kern_channel_ring *
flow_get_ring(struct nx_flowswitch * fsw,struct flow_entry * fe,enum txrx txrx)1285 flow_get_ring(struct nx_flowswitch *fsw, struct flow_entry *fe, enum txrx txrx)
1286 {
1287 	struct nexus_vp_adapter *na = NULL;
1288 	struct __kern_channel_ring *r = NULL;
1289 
1290 	na = VPNA(flow_get_na(fsw, fe));
1291 	if (__improbable(na == NULL)) {
1292 		return NULL;
1293 	}
1294 
1295 	switch (txrx) {
1296 	case NR_RX:
1297 		r = &na->vpna_up.na_rx_rings[0];
1298 		break;
1299 	case NR_TX:
1300 		r = &na->vpna_up.na_tx_rings[0];
1301 		break;
1302 	default:
1303 		__builtin_unreachable();
1304 		VERIFY(0);
1305 	}
1306 
1307 	if (__improbable(KR_DROP(r))) {
1308 		FSW_STATS_INC(FSW_STATS_DST_RING_DROPMODE);
1309 		SK_DF(SK_VERB_FSW_DP | SK_VERB_RING, "r %0xllx %s drop mode",
1310 		    r->ckr_name, SK_KVA(r));
1311 		return NULL;
1312 	}
1313 
1314 	ASSERT(KRNA(r)->na_md_type == NEXUS_META_TYPE_PACKET);
1315 
1316 #if (DEVELOPMENT || DEBUG)
1317 	if (r != NULL) {
1318 		_FSW_INJECT_ERROR(4, r, NULL, null_func);
1319 	}
1320 #endif /* DEVELOPMENT || DEBUG */
1321 
1322 	return r;
1323 }
1324 
1325 struct __kern_channel_ring *
fsw_flow_get_rx_ring(struct nx_flowswitch * fsw,struct flow_entry * fe)1326 fsw_flow_get_rx_ring(struct nx_flowswitch *fsw, struct flow_entry *fe)
1327 {
1328 	return flow_get_ring(fsw, fe, NR_RX);
1329 }
1330 
1331 static inline struct __kern_channel_ring *
fsw_flow_get_tx_ring(struct nx_flowswitch * fsw,struct flow_entry * fe)1332 fsw_flow_get_tx_ring(struct nx_flowswitch *fsw, struct flow_entry *fe)
1333 {
1334 	return flow_get_ring(fsw, fe, NR_TX);
1335 }
1336 
1337 static bool
dp_flow_route_process(struct nx_flowswitch * fsw,struct flow_entry * fe)1338 dp_flow_route_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
1339 {
1340 	struct flow_route *fr = fe->fe_route;
1341 	struct ifnet *ifp = fsw->fsw_ifp;
1342 
1343 	if (__improbable(!(fe->fe_flags & FLOWENTF_NONVIABLE) &&
1344 	    !fe->fe_want_nonviable && (fe->fe_key.fk_mask & FKMASK_SRC) &&
1345 	    fe->fe_laddr_gencnt != ifp->if_nx_flowswitch.if_fsw_ipaddr_gencnt &&
1346 	    !flow_route_key_validate(&fe->fe_key, ifp, &fe->fe_laddr_gencnt))) {
1347 		/*
1348 		 * The source address is no longer around; we want this
1349 		 * flow to be nonviable, but that requires holding the lock
1350 		 * as writer (which isn't the case now.)  Indicate that
1351 		 * we need to finalize the nonviable later down below.
1352 		 *
1353 		 * We also request that the flow route be re-configured,
1354 		 * if this is a connected mode flow.
1355 		 *
1356 		 */
1357 		if (!(fe->fe_flags & FLOWENTF_NONVIABLE)) {
1358 			/*
1359 			 * fsw_pending_nonviable is a hint for reaper thread;
1360 			 * due to the fact that setting fe_want_nonviable and
1361 			 * incrementing fsw_pending_nonviable counter is not
1362 			 * atomic, let the increment happen first, and the
1363 			 * thread losing the CAS does decrement.
1364 			 */
1365 			atomic_add_32(&fsw->fsw_pending_nonviable, 1);
1366 			if (atomic_test_set_32(&fe->fe_want_nonviable, 0, 1)) {
1367 				fsw_reap_sched(fsw);
1368 			} else {
1369 				atomic_add_32(&fsw->fsw_pending_nonviable, -1);
1370 			}
1371 		}
1372 		if (fr != NULL) {
1373 			atomic_add_32(&fr->fr_want_configure, 1);
1374 		}
1375 	}
1376 
1377 	/* if flow was (or is going to be) marked as nonviable, drop it */
1378 	if (__improbable(fe->fe_want_nonviable ||
1379 	    (fe->fe_flags & FLOWENTF_NONVIABLE) != 0)) {
1380 		SK_DF(SK_VERB_FSW_DP | SK_VERB_FLOW, "flow 0x%llx non-viable",
1381 		    SK_KVA(fe));
1382 		return false;
1383 	}
1384 	return true;
1385 }
1386 
1387 bool
dp_flow_rx_route_process(struct nx_flowswitch * fsw,struct flow_entry * fe)1388 dp_flow_rx_route_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
1389 {
1390 	bool okay;
1391 	okay = dp_flow_route_process(fsw, fe);
1392 #if (DEVELOPMENT || DEBUG)
1393 	if (okay) {
1394 		_FSW_INJECT_ERROR(5, okay, false, null_func);
1395 	}
1396 #endif /* DEVELOPMENT || DEBUG */
1397 
1398 	return okay;
1399 }
1400 
1401 void
dp_flow_rx_process(struct nx_flowswitch * fsw,struct flow_entry * fe)1402 dp_flow_rx_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
1403 {
1404 	struct pktq dpkts;              /* dst pool alloc'ed packets */
1405 	struct pktq disposed_pkts;         /* done src packets */
1406 	struct pktq dropped_pkts;         /* dropped src packets */
1407 	struct pktq transferred_pkts;         /* dst packet ready for ring */
1408 	struct __kern_packet *pkt, *tpkt;
1409 	struct kern_pbufpool *dpp;
1410 	uint32_t n_pkts = KPKTQ_LEN(&fe->fe_rx_pktq);
1411 	uint64_t buf_array[RX_BUFLET_BATCH_COUNT];
1412 	uint16_t buf_array_iter = 0;
1413 	uint32_t cnt, buf_cnt = 0;
1414 	int err;
1415 
1416 	KPKTQ_INIT(&dpkts);
1417 	KPKTQ_INIT(&dropped_pkts);
1418 	KPKTQ_INIT(&disposed_pkts);
1419 	KPKTQ_INIT(&transferred_pkts);
1420 
1421 	if (__improbable(!dp_flow_rx_route_process(fsw, fe))) {
1422 		SK_ERR("Rx route bad");
1423 		fsw_snoop_and_dequeue(fe, &dropped_pkts, true);
1424 		FSW_STATS_ADD(FSW_STATS_RX_FLOW_NONVIABLE, n_pkts);
1425 		goto done;
1426 	}
1427 
1428 	if (fe->fe_nx_port == FSW_VP_HOST) {
1429 		/*
1430 		 * The host ring does not exist anymore so we can't take
1431 		 * the enqueue path below. This path should only be hit
1432 		 * for the rare tcp fragmentation case.
1433 		 */
1434 		fsw_host_rx(fsw, &fe->fe_rx_pktq);
1435 		return;
1436 	}
1437 
1438 	/* find the ring */
1439 	struct __kern_channel_ring *r;
1440 	r = fsw_flow_get_rx_ring(fsw, fe);
1441 	if (__improbable(r == NULL)) {
1442 		fsw_snoop_and_dequeue(fe, &dropped_pkts, true);
1443 		goto done;
1444 	}
1445 
1446 	/* snoop before L2 is stripped */
1447 	if (__improbable(pktap_total_tap_count != 0)) {
1448 		fsw_snoop(fsw, fe, true);
1449 	}
1450 
1451 	dpp = r->ckr_pp;
1452 	/* batch allocate enough packets */
1453 	err = pp_alloc_pktq(dpp, 1, &dpkts, n_pkts, NULL, NULL,
1454 	    SKMEM_NOSLEEP);
1455 	if (__improbable(err == ENOMEM)) {
1456 		ASSERT(KPKTQ_EMPTY(&dpkts));
1457 		KPKTQ_CONCAT(&dropped_pkts, &fe->fe_rx_pktq);
1458 		FSW_STATS_ADD(FSW_STATS_DROP_NOMEM_PKT, n_pkts);
1459 		SK_ERR("failed to alloc %u pkts for kr %s, 0x%llu", n_pkts,
1460 		    r->ckr_name, SK_KVA(r));
1461 		goto done;
1462 	}
1463 
1464 	/*
1465 	 * estimate total number of buflets for the packet chain.
1466 	 */
1467 	cnt = howmany(fe->fe_rx_pktq_bytes, PP_BUF_SIZE_DEF(dpp));
1468 	if (cnt > n_pkts) {
1469 		ASSERT(dpp->pp_max_frags > 1);
1470 		cnt -= n_pkts;
1471 		buf_cnt = MIN(RX_BUFLET_BATCH_COUNT, cnt);
1472 		err = pp_alloc_buflet_batch(dpp, buf_array, &buf_cnt,
1473 		    SKMEM_NOSLEEP, PP_ALLOC_BFT_ATTACH_BUFFER);
1474 		if (__improbable(buf_cnt == 0)) {
1475 			KPKTQ_CONCAT(&dropped_pkts, &fe->fe_rx_pktq);
1476 			FSW_STATS_ADD(FSW_STATS_DROP_NOMEM_PKT, n_pkts);
1477 			SK_ERR("failed to alloc %d buflets (err %d) for kr %s, "
1478 			    "0x%llu", cnt, err, r->ckr_name, SK_KVA(r));
1479 			goto done;
1480 		}
1481 		err = 0;
1482 	}
1483 
1484 	/* extra processing for user flow */
1485 	KPKTQ_FOREACH_SAFE(pkt, &fe->fe_rx_pktq, tpkt) {
1486 		err = 0;
1487 		KPKTQ_REMOVE(&fe->fe_rx_pktq, pkt);
1488 		if (fe->fe_rx_pktq_bytes > pkt->pkt_flow_ulen) {
1489 			fe->fe_rx_pktq_bytes -= pkt->pkt_flow_ulen;
1490 		} else {
1491 			fe->fe_rx_pktq_bytes = 0;
1492 		}
1493 		err = flow_pkt_track(fe, pkt, true);
1494 		_FSW_INJECT_ERROR(33, err, EPROTO, null_func);
1495 		if (__improbable(err != 0)) {
1496 			SK_ERR("flow_pkt_track failed (err %d)", err);
1497 			FSW_STATS_INC(FSW_STATS_RX_FLOW_TRACK_ERR);
1498 			/* if need to trigger RST */
1499 			if (err == ENETRESET) {
1500 				flow_track_abort_tcp(fe, pkt, NULL);
1501 			}
1502 			KPKTQ_ENQUEUE(&dropped_pkts, pkt);
1503 			continue;
1504 		}
1505 
1506 		/* transfer to dpkt */
1507 		if (pkt->pkt_qum.qum_pp != dpp) {
1508 			struct __kern_buflet *bprev, *bnew;
1509 			struct __kern_packet *dpkt = NULL;
1510 			uint32_t n_bufs, i;
1511 
1512 			KPKTQ_DEQUEUE(&dpkts, dpkt);
1513 			if (__improbable(dpkt == NULL)) {
1514 				FSW_STATS_INC(FSW_STATS_DROP_NOMEM_PKT);
1515 				KPKTQ_ENQUEUE(&dropped_pkts, pkt);
1516 				continue;
1517 			}
1518 			n_bufs = howmany(pkt->pkt_length, PP_BUF_SIZE_DEF(dpp));
1519 			n_bufs--;
1520 			for (i = 0; i < n_bufs; i++) {
1521 				if (__improbable(buf_cnt == 0)) {
1522 					ASSERT(dpp->pp_max_frags > 1);
1523 					buf_array_iter = 0;
1524 					cnt = howmany(fe->fe_rx_pktq_bytes,
1525 					    PP_BUF_SIZE_DEF(dpp));
1526 					n_pkts = KPKTQ_LEN(&fe->fe_rx_pktq);
1527 					if (cnt >= n_pkts) {
1528 						cnt -= n_pkts;
1529 					} else {
1530 						cnt = 0;
1531 					}
1532 					cnt += (n_bufs - i);
1533 					buf_cnt = MIN(RX_BUFLET_BATCH_COUNT,
1534 					    cnt);
1535 					cnt = buf_cnt;
1536 					err = pp_alloc_buflet_batch(dpp,
1537 					    buf_array, &buf_cnt,
1538 					    SKMEM_NOSLEEP, PP_ALLOC_BFT_ATTACH_BUFFER);
1539 					if (__improbable(buf_cnt == 0)) {
1540 						FSW_STATS_INC(FSW_STATS_DROP_NOMEM_PKT);
1541 						KPKTQ_ENQUEUE(&dropped_pkts,
1542 						    pkt);
1543 						pkt = NULL;
1544 						pp_free_packet_single(dpkt);
1545 						dpkt = NULL;
1546 						SK_ERR("failed to alloc %d "
1547 						    "buflets (err %d) for "
1548 						    "kr %s, 0x%llu", cnt, err,
1549 						    r->ckr_name, SK_KVA(r));
1550 						break;
1551 					}
1552 					err = 0;
1553 				}
1554 				ASSERT(buf_cnt != 0);
1555 				if (i == 0) {
1556 					PKT_GET_FIRST_BUFLET(dpkt, 1, bprev);
1557 				}
1558 				bnew = (kern_buflet_t)buf_array[buf_array_iter];
1559 				buf_array[buf_array_iter] = 0;
1560 				buf_array_iter++;
1561 				buf_cnt--;
1562 				VERIFY(kern_packet_add_buflet(SK_PKT2PH(dpkt),
1563 				    bprev, bnew) == 0);
1564 				bprev = bnew;
1565 			}
1566 			if (__improbable(err != 0)) {
1567 				continue;
1568 			}
1569 			err = copy_packet_from_dev(fsw, pkt, dpkt);
1570 			_FSW_INJECT_ERROR(43, err, EINVAL, null_func);
1571 			if (__improbable(err != 0)) {
1572 				SK_ERR("copy packet failed (err %d)", err);
1573 				KPKTQ_ENQUEUE(&dropped_pkts, pkt);
1574 				pp_free_packet_single(dpkt);
1575 				dpkt = NULL;
1576 				continue;
1577 			}
1578 			KPKTQ_ENQUEUE(&disposed_pkts, pkt);
1579 			pkt = dpkt;
1580 		}
1581 		_UUID_COPY(pkt->pkt_flow_id, fe->fe_uuid);
1582 		_UUID_COPY(pkt->pkt_policy_euuid, fe->fe_eproc_uuid);
1583 		pkt->pkt_policy_id = fe->fe_policy_id;
1584 		pkt->pkt_transport_protocol = fe->fe_transport_protocol;
1585 		if (pkt->pkt_bufs_cnt > 1) {
1586 			pkt->pkt_aggr_type = PKT_AGGR_SINGLE_IP;
1587 			pkt->pkt_seg_cnt = 1;
1588 		}
1589 		KPKTQ_ENQUEUE(&transferred_pkts, pkt);
1590 	}
1591 	KPKTQ_FINI(&fe->fe_rx_pktq);
1592 	KPKTQ_CONCAT(&fe->fe_rx_pktq, &transferred_pkts);
1593 	KPKTQ_FINI(&transferred_pkts);
1594 
1595 	fsw_ring_enqueue_tail_drop(fsw, r, &fe->fe_rx_pktq);
1596 
1597 done:
1598 	/* Free unused buflets */
1599 	while (buf_cnt > 0) {
1600 		pp_free_buflet(dpp, (kern_buflet_t)(buf_array[buf_array_iter]));
1601 		buf_array[buf_array_iter] = 0;
1602 		buf_array_iter++;
1603 		buf_cnt--;
1604 	}
1605 	dp_free_pktq(fsw, &dpkts);
1606 	dp_free_pktq(fsw, &disposed_pkts);
1607 	dp_drop_pktq(fsw, &dropped_pkts);
1608 }
1609 
1610 static inline void
rx_flow_process(struct nx_flowswitch * fsw,struct flow_entry * fe)1611 rx_flow_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
1612 {
1613 	ASSERT(!KPKTQ_EMPTY(&fe->fe_rx_pktq));
1614 	ASSERT(KPKTQ_LEN(&fe->fe_rx_pktq) != 0);
1615 
1616 	SK_DF(SK_VERB_FSW_DP | SK_VERB_RX, "Rx %d pkts for fe %p port %d",
1617 	    KPKTQ_LEN(&fe->fe_rx_pktq), fe, fe->fe_nx_port);
1618 
1619 	/* flow related processing (default, agg, fpd, etc.) */
1620 	fe->fe_rx_process(fsw, fe);
1621 
1622 	if (__improbable(fe->fe_want_withdraw)) {
1623 		fsw_reap_sched(fsw);
1624 	}
1625 
1626 	KPKTQ_FINI(&fe->fe_rx_pktq);
1627 }
1628 
1629 static inline void
dp_rx_process_wake_packet(struct nx_flowswitch * fsw,struct __kern_packet * pkt)1630 dp_rx_process_wake_packet(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
1631 {
1632 	/*
1633 	 * We only care about wake packets of flows that belong the flow switch
1634 	 * as wake packets for the host stack are handled by the host input
1635 	 * function
1636 	 */
1637 #if (DEBUG || DEVELOPMENT)
1638 	if (__improbable(fsw->fsw_ifp->if_xflags & IFXF_MARK_WAKE_PKT)) {
1639 		/*
1640 		 * This is a one shot command
1641 		 */
1642 		fsw->fsw_ifp->if_xflags &= ~IFXF_MARK_WAKE_PKT;
1643 
1644 		pkt->pkt_pflags |= PKT_F_WAKE_PKT;
1645 	}
1646 #endif /* (DEBUG || DEVELOPMENT) */
1647 	if (__improbable(pkt->pkt_pflags & PKT_F_WAKE_PKT)) {
1648 		if_ports_used_match_pkt(fsw->fsw_ifp, pkt);
1649 	}
1650 }
1651 
1652 static void
_fsw_receive_locked(struct nx_flowswitch * fsw,struct pktq * pktq)1653 _fsw_receive_locked(struct nx_flowswitch *fsw, struct pktq *pktq)
1654 {
1655 	struct __kern_packet *pkt, *tpkt;
1656 	struct flow_entry_list fes = TAILQ_HEAD_INITIALIZER(fes);
1657 	struct flow_entry *fe, *prev_fe;
1658 	sa_family_t af;
1659 	struct pktq host_pkts, dropped_pkts;
1660 	int err;
1661 
1662 	KPKTQ_INIT(&host_pkts);
1663 	KPKTQ_INIT(&dropped_pkts);
1664 
1665 	if (__improbable(FSW_QUIESCED(fsw))) {
1666 		DTRACE_SKYWALK1(rx__quiesced, struct nx_flowswitch *, fsw);
1667 		KPKTQ_CONCAT(&dropped_pkts, pktq);
1668 		goto done;
1669 	}
1670 	if (__improbable(fsw->fsw_demux == NULL)) {
1671 		KPKTQ_CONCAT(&dropped_pkts, pktq);
1672 		goto done;
1673 	}
1674 
1675 	prev_fe = NULL;
1676 	KPKTQ_FOREACH_SAFE(pkt, pktq, tpkt) {
1677 		if (__probable(tpkt)) {
1678 			void *baddr;
1679 			MD_BUFLET_ADDR_ABS_PKT(tpkt, baddr);
1680 			SK_PREFETCH(baddr, 0);
1681 			/* prefetch L3 and L4 flow structs */
1682 			SK_PREFETCHW(tpkt->pkt_flow, 0);
1683 			SK_PREFETCHW(tpkt->pkt_flow, 128);
1684 		}
1685 
1686 		KPKTQ_REMOVE(pktq, pkt);
1687 
1688 		pkt = rx_prepare_packet(fsw, pkt);
1689 
1690 		af = fsw->fsw_demux(fsw, pkt);
1691 		if (__improbable(af == AF_UNSPEC)) {
1692 			KPKTQ_ENQUEUE(&host_pkts, pkt);
1693 			continue;
1694 		}
1695 
1696 		err = flow_pkt_classify(pkt, fsw->fsw_ifp, af, TRUE);
1697 		_FSW_INJECT_ERROR(1, err, ENXIO, null_func);
1698 		if (__improbable(err != 0)) {
1699 			FSW_STATS_INC(FSW_STATS_RX_FLOW_EXTRACT_ERR);
1700 			KPKTQ_ENQUEUE(&host_pkts, pkt);
1701 			continue;
1702 		}
1703 
1704 		if (__improbable(pkt->pkt_flow_ip_is_frag)) {
1705 			pkt = rx_process_ip_frag(fsw, pkt);
1706 			if (pkt == NULL) {
1707 				continue;
1708 			}
1709 		}
1710 
1711 #if DEVELOPMENT || DEBUG
1712 		trace_pkt_dump_payload(fsw->fsw_ifp, pkt, true);
1713 #endif /* DEVELOPMENT || DEBUG */
1714 
1715 		prev_fe = fe = rx_lookup_flow(fsw, pkt, prev_fe);
1716 		if (__improbable(fe == NULL)) {
1717 			KPKTQ_ENQUEUE_LIST(&host_pkts, pkt);
1718 			continue;
1719 		}
1720 
1721 		fe->fe_rx_pktq_bytes += pkt->pkt_flow_ulen;
1722 
1723 		dp_rx_process_wake_packet(fsw, pkt);
1724 
1725 		rx_flow_batch_packet(&fes, fe, pkt);
1726 		prev_fe = fe;
1727 	}
1728 
1729 	struct flow_entry *tfe = NULL;
1730 	TAILQ_FOREACH_SAFE(fe, &fes, fe_rx_link, tfe) {
1731 		rx_flow_process(fsw, fe);
1732 		TAILQ_REMOVE(&fes, fe, fe_rx_link);
1733 		fe->fe_rx_pktq_bytes = 0;
1734 		fe->fe_rx_frag_count = 0;
1735 		flow_entry_release(&fe);
1736 	}
1737 
1738 	if (!KPKTQ_EMPTY(&host_pkts)) {
1739 		fsw_host_rx(fsw, &host_pkts);
1740 	}
1741 
1742 done:
1743 	dp_drop_pktq(fsw, &dropped_pkts);
1744 }
1745 
1746 #if (DEVELOPMENT || DEBUG)
1747 static void
fsw_rps_rx(struct nx_flowswitch * fsw,uint32_t id,struct __kern_packet * pkt)1748 fsw_rps_rx(struct nx_flowswitch *fsw, uint32_t id,
1749     struct __kern_packet *pkt)
1750 {
1751 	struct fsw_rps_thread *frt = &fsw->fsw_rps_threads[id];
1752 
1753 	lck_mtx_lock_spin(&frt->frt_lock);
1754 	KPKTQ_ENQUEUE(&frt->frt_pktq, pkt);
1755 	lck_mtx_unlock(&frt->frt_lock);
1756 }
1757 
1758 static void
fsw_rps_thread_schedule(struct nx_flowswitch * fsw,uint32_t id)1759 fsw_rps_thread_schedule(struct nx_flowswitch *fsw, uint32_t id)
1760 {
1761 	struct fsw_rps_thread *frt = &fsw->fsw_rps_threads[id];
1762 
1763 	ASSERT(frt->frt_thread != THREAD_NULL);
1764 	lck_mtx_lock_spin(&frt->frt_lock);
1765 	ASSERT(!(frt->frt_flags & (FRT_TERMINATING | FRT_TERMINATED)));
1766 
1767 	frt->frt_requests++;
1768 	if (!(frt->frt_flags & FRT_RUNNING)) {
1769 		thread_wakeup((caddr_t)frt);
1770 	}
1771 	lck_mtx_unlock(&frt->frt_lock);
1772 }
1773 
1774 __attribute__((noreturn))
1775 static void
fsw_rps_thread_cont(void * v,wait_result_t w)1776 fsw_rps_thread_cont(void *v, wait_result_t w)
1777 {
1778 	struct fsw_rps_thread *frt = v;
1779 	struct nx_flowswitch *fsw = frt->frt_fsw;
1780 
1781 	lck_mtx_lock(&frt->frt_lock);
1782 	if (__improbable(w == THREAD_INTERRUPTIBLE ||
1783 	    (frt->frt_flags & FRT_TERMINATING) != 0)) {
1784 		goto terminate;
1785 	}
1786 	if (KPKTQ_EMPTY(&frt->frt_pktq)) {
1787 		goto done;
1788 	}
1789 	frt->frt_flags |= FRT_RUNNING;
1790 
1791 	for (;;) {
1792 		uint32_t requests = frt->frt_requests;
1793 		struct pktq pkts;
1794 
1795 		KPKTQ_INIT(&pkts);
1796 		KPKTQ_CONCAT(&pkts, &frt->frt_pktq);
1797 		lck_mtx_unlock(&frt->frt_lock);
1798 
1799 		sk_protect_t protect;
1800 		protect = sk_sync_protect();
1801 		FSW_RLOCK(fsw);
1802 		_fsw_receive_locked(fsw, &pkts);
1803 		FSW_RUNLOCK(fsw);
1804 		sk_sync_unprotect(protect);
1805 
1806 		lck_mtx_lock(&frt->frt_lock);
1807 		if ((frt->frt_flags & FRT_TERMINATING) != 0 ||
1808 		    requests == frt->frt_requests) {
1809 			frt->frt_requests = 0;
1810 			break;
1811 		}
1812 	}
1813 
1814 done:
1815 	lck_mtx_unlock(&frt->frt_lock);
1816 	if (!(frt->frt_flags & FRT_TERMINATING)) {
1817 		frt->frt_flags &= ~FRT_RUNNING;
1818 		assert_wait(frt, THREAD_UNINT);
1819 		thread_block_parameter(fsw_rps_thread_cont, frt);
1820 		__builtin_unreachable();
1821 	} else {
1822 terminate:
1823 		LCK_MTX_ASSERT(&frt->frt_lock, LCK_MTX_ASSERT_OWNED);
1824 		frt->frt_flags &= ~(FRT_RUNNING | FRT_TERMINATING);
1825 		frt->frt_flags |= FRT_TERMINATED;
1826 
1827 		if (frt->frt_flags & FRT_TERMINATEBLOCK) {
1828 			thread_wakeup((caddr_t)&frt);
1829 		}
1830 		lck_mtx_unlock(&frt->frt_lock);
1831 
1832 		SK_D("fsw_rx_%s_%d terminated", if_name(fsw->fsw_ifp),
1833 		    frt->frt_idx);
1834 
1835 		/* for the extra refcnt from kernel_thread_start() */
1836 		thread_deallocate(current_thread());
1837 		/* this is the end */
1838 		thread_terminate(current_thread());
1839 		/* NOTREACHED */
1840 		__builtin_unreachable();
1841 	}
1842 
1843 	/* must never get here */
1844 	VERIFY(0);
1845 	/* NOTREACHED */
1846 	__builtin_unreachable();
1847 }
1848 
1849 __attribute__((noreturn))
1850 static void
fsw_rps_thread_func(void * v,wait_result_t w)1851 fsw_rps_thread_func(void *v, wait_result_t w)
1852 {
1853 #pragma unused(w)
1854 	struct fsw_rps_thread *frt = v;
1855 	struct nx_flowswitch *fsw = frt->frt_fsw;
1856 
1857 	char thread_name[MAXTHREADNAMESIZE];
1858 	bzero(thread_name, sizeof(thread_name));
1859 	(void) snprintf(thread_name, sizeof(thread_name), "fsw_rx_%s_%d",
1860 	    if_name(fsw->fsw_ifp), frt->frt_idx);
1861 	thread_set_thread_name(frt->frt_thread, thread_name);
1862 	SK_D("%s spawned", thread_name);
1863 
1864 	net_thread_marks_push(NET_THREAD_SYNC_RX);
1865 	assert_wait(frt, THREAD_UNINT);
1866 	(void) thread_block_parameter(fsw_rps_thread_cont, frt);
1867 
1868 	__builtin_unreachable();
1869 }
1870 
1871 static void
fsw_rps_thread_join(struct nx_flowswitch * fsw,uint32_t i)1872 fsw_rps_thread_join(struct nx_flowswitch *fsw, uint32_t i)
1873 {
1874 	struct fsw_rps_thread *frt = &fsw->fsw_rps_threads[i];
1875 	uint64_t f = (1 * NSEC_PER_MSEC);
1876 	uint64_t s = (1000 * NSEC_PER_SEC);
1877 	uint32_t c = 0;
1878 
1879 	lck_mtx_lock(&frt->frt_lock);
1880 	frt->frt_flags |= FRT_TERMINATING;
1881 
1882 	while (!(frt->frt_flags & FRT_TERMINATED)) {
1883 		uint64_t t = 0;
1884 		nanoseconds_to_absolutetime((c++ == 0) ? f : s, &t);
1885 		clock_absolutetime_interval_to_deadline(t, &t);
1886 		ASSERT(t != 0);
1887 
1888 		frt->frt_flags |= FRT_TERMINATEBLOCK;
1889 		if (!(frt->frt_flags & FRT_RUNNING)) {
1890 			thread_wakeup_one((caddr_t)frt);
1891 		}
1892 		(void) assert_wait_deadline(&frt->frt_thread, THREAD_UNINT, t);
1893 		lck_mtx_unlock(&frt->frt_lock);
1894 		thread_block(THREAD_CONTINUE_NULL);
1895 		lck_mtx_lock(&frt->frt_lock);
1896 		frt->frt_flags &= ~FRT_TERMINATEBLOCK;
1897 	}
1898 	ASSERT(frt->frt_flags & FRT_TERMINATED);
1899 	lck_mtx_unlock(&frt->frt_lock);
1900 	frt->frt_thread = THREAD_NULL;
1901 }
1902 
1903 static void
fsw_rps_thread_spawn(struct nx_flowswitch * fsw,uint32_t i)1904 fsw_rps_thread_spawn(struct nx_flowswitch *fsw, uint32_t i)
1905 {
1906 	kern_return_t error;
1907 	struct fsw_rps_thread *frt = &fsw->fsw_rps_threads[i];
1908 	lck_mtx_init(&frt->frt_lock, &nexus_lock_group, &nexus_lock_attr);
1909 	frt->frt_idx = i;
1910 	frt->frt_fsw = fsw;
1911 	error = kernel_thread_start(fsw_rps_thread_func, frt, &frt->frt_thread);
1912 	ASSERT(!error);
1913 	KPKTQ_INIT(&frt->frt_pktq);
1914 }
1915 
1916 int
fsw_rps_set_nthreads(struct nx_flowswitch * fsw,uint32_t n)1917 fsw_rps_set_nthreads(struct nx_flowswitch* fsw, uint32_t n)
1918 {
1919 	if (n > FSW_RPS_MAX_NTHREADS) {
1920 		SK_ERR("rps nthreads %d, max %d", n, FSW_RPS_MAX_NTHREADS);
1921 		return EINVAL;
1922 	}
1923 
1924 	FSW_WLOCK(fsw);
1925 	if (n < fsw->fsw_rps_nthreads) {
1926 		for (uint32_t i = n; i < fsw->fsw_rps_nthreads; i++) {
1927 			fsw_rps_thread_join(fsw, i);
1928 		}
1929 		fsw->fsw_rps_threads = krealloc_type(struct fsw_rps_thread,
1930 		    fsw->fsw_rps_nthreads, n, fsw->fsw_rps_threads,
1931 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1932 		ASSERT(n != 0 ^ fsw->fsw_rps_threads == NULL);
1933 	} else if (n > fsw->fsw_rps_nthreads) {
1934 		fsw->fsw_rps_threads = krealloc_type(struct fsw_rps_thread,
1935 		    fsw->fsw_rps_nthreads, n, fsw->fsw_rps_threads,
1936 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1937 		for (uint32_t i = fsw->fsw_rps_nthreads; i < n; i++) {
1938 			fsw_rps_thread_spawn(fsw, i);
1939 		}
1940 	}
1941 	fsw->fsw_rps_nthreads = n;
1942 	FSW_WUNLOCK(fsw);
1943 	return 0;
1944 }
1945 
1946 static uint32_t
get_rps_id(struct nx_flowswitch * fsw,struct __kern_packet * pkt)1947 get_rps_id(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
1948 {
1949 	sa_family_t af = fsw->fsw_demux(fsw, pkt);
1950 	if (__improbable(af == AF_UNSPEC)) {
1951 		return 0;
1952 	}
1953 
1954 	flow_pkt_classify(pkt, fsw->fsw_ifp, af, true);
1955 
1956 	if (__improbable((pkt->pkt_qum_qflags &
1957 	    QUM_F_FLOW_CLASSIFIED) == 0)) {
1958 		return 0;
1959 	}
1960 
1961 	struct flow_key key;
1962 	flow_pkt2key(pkt, true, &key);
1963 	key.fk_mask = FKMASK_5TUPLE;
1964 
1965 	uint32_t id = flow_key_hash(&key) % fsw->fsw_rps_nthreads;
1966 
1967 	return id;
1968 }
1969 
1970 #endif /* !DEVELOPMENT && !DEBUG */
1971 
1972 void
fsw_receive(struct nx_flowswitch * fsw,struct pktq * pktq)1973 fsw_receive(struct nx_flowswitch *fsw, struct pktq *pktq)
1974 {
1975 	FSW_RLOCK(fsw);
1976 #if (DEVELOPMENT || DEBUG)
1977 	if (fsw->fsw_rps_nthreads != 0) {
1978 		struct __kern_packet *pkt, *tpkt;
1979 		bitmap_t map = 0;
1980 
1981 		_CASSERT(BITMAP_LEN(FSW_RPS_MAX_NTHREADS) == 1);
1982 		KPKTQ_FOREACH_SAFE(pkt, pktq, tpkt) {
1983 			uint32_t id = get_rps_id(fsw, pkt);
1984 			KPKTQ_REMOVE(pktq, pkt);
1985 			fsw_rps_rx(fsw, id, pkt);
1986 			bitmap_set(&map, id);
1987 		}
1988 		for (int i = bitmap_first(&map, 64); i >= 0;
1989 		    i = bitmap_next(&map, i)) {
1990 			fsw_rps_thread_schedule(fsw, i);
1991 		}
1992 	} else
1993 #endif /* !DEVELOPMENT && !DEBUG */
1994 	{
1995 		_fsw_receive_locked(fsw, pktq);
1996 	}
1997 	FSW_RUNLOCK(fsw);
1998 }
1999 
2000 int
fsw_dev_input_netem_dequeue(void * handle,pktsched_pkt_t * pkts,uint32_t n_pkts)2001 fsw_dev_input_netem_dequeue(void *handle, pktsched_pkt_t * pkts,
2002     uint32_t n_pkts)
2003 {
2004 #pragma unused(handle)
2005 	struct nx_flowswitch *fsw = handle;
2006 	struct __kern_packet *kpkts[FSW_VP_DEV_BATCH_MAX];
2007 	struct pktq pktq;
2008 	sk_protect_t protect;
2009 	uint32_t i;
2010 
2011 	ASSERT(n_pkts <= FSW_VP_DEV_BATCH_MAX);
2012 
2013 	for (i = 0; i < n_pkts; i++) {
2014 		ASSERT(pkts[i].pktsched_ptype == QP_PACKET);
2015 		ASSERT(pkts[i].pktsched_pkt_kpkt != NULL);
2016 		kpkts[i] = pkts[i].pktsched_pkt_kpkt;
2017 	}
2018 
2019 	protect = sk_sync_protect();
2020 	KPKTQ_INIT(&pktq);
2021 	pkts_to_pktq(kpkts, n_pkts, &pktq);
2022 
2023 	fsw_receive(fsw, &pktq);
2024 	KPKTQ_FINI(&pktq);
2025 	sk_sync_unprotect(protect);
2026 
2027 	return 0;
2028 }
2029 
2030 static void
fsw_dev_input_netem_enqueue(struct nx_flowswitch * fsw,struct pktq * q)2031 fsw_dev_input_netem_enqueue(struct nx_flowswitch *fsw, struct pktq *q)
2032 {
2033 	classq_pkt_t p;
2034 	struct netem *ne;
2035 	struct __kern_packet *pkt, *tpkt;
2036 
2037 	ASSERT(fsw->fsw_ifp != NULL);
2038 	ne = fsw->fsw_ifp->if_input_netem;
2039 	ASSERT(ne != NULL);
2040 	KPKTQ_FOREACH_SAFE(pkt, q, tpkt) {
2041 		bool pdrop;
2042 		KPKTQ_REMOVE(q, pkt);
2043 		CLASSQ_PKT_INIT_PACKET(&p, pkt);
2044 		netem_enqueue(ne, &p, &pdrop);
2045 	}
2046 }
2047 
2048 void
fsw_devna_rx(struct nexus_adapter * devna,struct __kern_packet * pkt_head,struct nexus_pkt_stats * out_stats)2049 fsw_devna_rx(struct nexus_adapter *devna, struct __kern_packet *pkt_head,
2050     struct nexus_pkt_stats *out_stats)
2051 {
2052 	struct __kern_packet *pkt = pkt_head, *next;
2053 	struct nx_flowswitch *fsw;
2054 	uint32_t n_bytes = 0, n_pkts = 0;
2055 	uint64_t total_pkts = 0, total_bytes = 0;
2056 	struct pktq q;
2057 
2058 	KPKTQ_INIT(&q);
2059 	if (__improbable(devna->na_ifp == NULL ||
2060 	    (fsw = fsw_ifp_to_fsw(devna->na_ifp)) == NULL)) {
2061 		SK_ERR("fsw not attached, dropping %d pkts", KPKTQ_LEN(&q));
2062 		pp_free_packet_chain(pkt_head, NULL);
2063 		return;
2064 	}
2065 	while (pkt != NULL) {
2066 		if (__improbable(pkt->pkt_trace_id != 0)) {
2067 			KDBG(SK_KTRACE_PKT_RX_DRV | DBG_FUNC_END, pkt->pkt_trace_id);
2068 			KDBG(SK_KTRACE_PKT_RX_FSW | DBG_FUNC_START, pkt->pkt_trace_id);
2069 		}
2070 		next = pkt->pkt_nextpkt;
2071 		pkt->pkt_nextpkt = NULL;
2072 
2073 		if (__probable((pkt->pkt_qum_qflags & QUM_F_DROPPED) == 0)) {
2074 			KPKTQ_ENQUEUE(&q, pkt);
2075 			n_bytes += pkt->pkt_length;
2076 		} else {
2077 			DTRACE_SKYWALK1(non__finalized__drop,
2078 			    struct __kern_packet *, pkt);
2079 			FSW_STATS_INC(FSW_STATS_RX_PKT_NOT_FINALIZED);
2080 			pp_free_packet_single(pkt);
2081 			pkt = NULL;
2082 		}
2083 		n_pkts = KPKTQ_LEN(&q);
2084 		if (n_pkts == fsw_rx_batch || (next == NULL && n_pkts > 0)) {
2085 			if (__improbable(fsw->fsw_ifp->if_input_netem != NULL)) {
2086 				fsw_dev_input_netem_enqueue(fsw, &q);
2087 			} else {
2088 				fsw_receive(fsw, &q);
2089 			}
2090 			total_pkts += n_pkts;
2091 			total_bytes += n_bytes;
2092 			n_pkts = 0;
2093 			n_bytes = 0;
2094 			KPKTQ_FINI(&q);
2095 		}
2096 		pkt = next;
2097 	}
2098 	ASSERT(KPKTQ_LEN(&q) == 0);
2099 	FSW_STATS_ADD(FSW_STATS_RX_PACKETS, total_pkts);
2100 	if (out_stats != NULL) {
2101 		out_stats->nps_pkts = total_pkts;
2102 		out_stats->nps_bytes = total_bytes;
2103 	}
2104 	KDBG(SK_KTRACE_FSW_DEV_RING_FLUSH, SK_KVA(devna), total_pkts, total_bytes);
2105 }
2106 
2107 static int
dp_copy_to_dev_mbuf(struct nx_flowswitch * fsw,struct __kern_packet * spkt,struct __kern_packet * dpkt)2108 dp_copy_to_dev_mbuf(struct nx_flowswitch *fsw, struct __kern_packet *spkt,
2109     struct __kern_packet *dpkt)
2110 {
2111 	struct mbuf *m = NULL;
2112 	uint16_t bdlen, bdlim, bdoff;
2113 	uint8_t *bdaddr;
2114 	unsigned int one = 1;
2115 	int err = 0;
2116 
2117 	err = mbuf_allocpacket(MBUF_DONTWAIT,
2118 	    (fsw->fsw_frame_headroom + spkt->pkt_length), &one, &m);
2119 #if (DEVELOPMENT || DEBUG)
2120 	if (m != NULL) {
2121 		_FSW_INJECT_ERROR(11, m, NULL, m_freem, m);
2122 	}
2123 #endif /* DEVELOPMENT || DEBUG */
2124 	if (__improbable(m == NULL)) {
2125 		FSW_STATS_INC(FSW_STATS_DROP_NOMEM_MBUF);
2126 		err = ENOBUFS;
2127 		goto done;
2128 	}
2129 
2130 	MD_BUFLET_ADDR_ABS_DLEN(dpkt, bdaddr, bdlen, bdlim, bdoff);
2131 	if (fsw->fsw_frame_headroom > bdlim) {
2132 		SK_ERR("not enough space in buffer for headroom");
2133 		err = EINVAL;
2134 		goto done;
2135 	}
2136 
2137 	dpkt->pkt_headroom = fsw->fsw_frame_headroom;
2138 	dpkt->pkt_mbuf = m;
2139 	dpkt->pkt_pflags |= PKT_F_MBUF_DATA;
2140 
2141 	/* packet copy into mbuf */
2142 	fsw->fsw_pkt_copy_to_mbuf(NR_TX, SK_PTR_ENCODE(spkt,
2143 	    METADATA_TYPE(spkt), METADATA_SUBTYPE(spkt)), 0, m,
2144 	    fsw->fsw_frame_headroom, spkt->pkt_length,
2145 	    PACKET_HAS_PARTIAL_CHECKSUM(spkt),
2146 	    spkt->pkt_csum_tx_start_off);
2147 	FSW_STATS_INC(FSW_STATS_TX_COPY_PKT2MBUF);
2148 
2149 	/* header copy into dpkt buffer for classification */
2150 	kern_packet_t sph = SK_PTR_ENCODE(spkt,
2151 	    METADATA_TYPE(spkt), METADATA_SUBTYPE(spkt));
2152 	kern_packet_t dph = SK_PTR_ENCODE(dpkt,
2153 	    METADATA_TYPE(dpkt), METADATA_SUBTYPE(dpkt));
2154 	uint32_t copy_len = MIN(spkt->pkt_length, bdlim - dpkt->pkt_headroom);
2155 	fsw->fsw_pkt_copy_from_pkt(NR_TX, dph, dpkt->pkt_headroom,
2156 	    sph, spkt->pkt_headroom, copy_len, FALSE, 0, 0, 0);
2157 
2158 	/*
2159 	 * fsw->fsw_frame_headroom is after m_data, thus we treat m_data same as
2160 	 * buflet baddr m_data always points to the beginning of packet and
2161 	 * should represents the same as baddr + headroom
2162 	 */
2163 	ASSERT((uintptr_t)m->m_data ==
2164 	    ((uintptr_t)mbuf_datastart(m) + fsw->fsw_frame_headroom));
2165 
2166 done:
2167 	return err;
2168 }
2169 
2170 static int
dp_copy_to_dev_pkt(struct nx_flowswitch * fsw,struct __kern_packet * spkt,struct __kern_packet * dpkt)2171 dp_copy_to_dev_pkt(struct nx_flowswitch *fsw, struct __kern_packet *spkt,
2172     struct __kern_packet *dpkt)
2173 {
2174 	struct ifnet *ifp = fsw->fsw_ifp;
2175 	uint16_t headroom = fsw->fsw_frame_headroom + ifp->if_tx_headroom;
2176 
2177 	if (headroom > UINT8_MAX) {
2178 		SK_ERR("headroom too large %d", headroom);
2179 		return ERANGE;
2180 	}
2181 	dpkt->pkt_headroom = (uint8_t)headroom;
2182 	ASSERT((dpkt->pkt_headroom & 0x7) == 0);
2183 	dpkt->pkt_l2_len = 0;
2184 	dpkt->pkt_link_flags = spkt->pkt_link_flags;
2185 
2186 	kern_packet_t sph = SK_PTR_ENCODE(spkt,
2187 	    METADATA_TYPE(spkt), METADATA_SUBTYPE(spkt));
2188 	kern_packet_t dph = SK_PTR_ENCODE(dpkt,
2189 	    METADATA_TYPE(dpkt), METADATA_SUBTYPE(dpkt));
2190 	fsw->fsw_pkt_copy_from_pkt(NR_TX, dph,
2191 	    dpkt->pkt_headroom, sph, spkt->pkt_headroom,
2192 	    spkt->pkt_length, PACKET_HAS_PARTIAL_CHECKSUM(spkt),
2193 	    (spkt->pkt_csum_tx_start_off - spkt->pkt_headroom),
2194 	    (spkt->pkt_csum_tx_stuff_off - spkt->pkt_headroom),
2195 	    (spkt->pkt_csum_flags & PACKET_CSUM_ZERO_INVERT));
2196 
2197 	FSW_STATS_INC(FSW_STATS_TX_COPY_PKT2PKT);
2198 
2199 	return 0;
2200 }
2201 
2202 #if SK_LOG
2203 /* Hoisted out of line to reduce kernel stack footprint */
2204 SK_LOG_ATTRIBUTE
2205 static void
dp_copy_to_dev_log(struct nx_flowswitch * fsw,const struct kern_pbufpool * pp,struct __kern_packet * spkt,struct __kern_packet * dpkt,int error)2206 dp_copy_to_dev_log(struct nx_flowswitch *fsw, const struct kern_pbufpool *pp,
2207     struct __kern_packet *spkt, struct __kern_packet *dpkt, int error)
2208 {
2209 	struct proc *p = current_proc();
2210 	struct ifnet *ifp = fsw->fsw_ifp;
2211 	uint64_t logflags = (SK_VERB_FSW_DP | SK_VERB_TX);
2212 
2213 	if (error == ERANGE) {
2214 		SK_ERR("packet too long, hr(fr+tx)+slen (%u+%u)+%u > "
2215 		    "dev_pp_max %u", (uint32_t)fsw->fsw_frame_headroom,
2216 		    (uint32_t)ifp->if_tx_headroom, spkt->pkt_length,
2217 		    (uint32_t)pp->pp_max_frags * PP_BUF_SIZE_DEF(pp));
2218 	} else if (error == ENOBUFS) {
2219 		SK_DF(logflags, "%s(%d) packet allocation failure",
2220 		    sk_proc_name_address(p), sk_proc_pid(p));
2221 	} else if (error == 0) {
2222 		ASSERT(dpkt != NULL);
2223 		char *daddr;
2224 		MD_BUFLET_ADDR_ABS(dpkt, daddr);
2225 		SK_DF(logflags, "%s(%d) splen %u dplen %u hr %u (fr/tx %u/%u)",
2226 		    sk_proc_name_address(p), sk_proc_pid(p), spkt->pkt_length,
2227 		    dpkt->pkt_length, (uint32_t)dpkt->pkt_headroom,
2228 		    (uint32_t)fsw->fsw_frame_headroom,
2229 		    (uint32_t)ifp->if_tx_headroom);
2230 		SK_DF(logflags | SK_VERB_DUMP, "%s",
2231 		    sk_dump("buf", daddr, dpkt->pkt_length, 128, NULL, 0));
2232 	} else {
2233 		SK_DF(logflags, "%s(%d) error %d", error);
2234 	}
2235 }
2236 #else
2237 #define dp_copy_to_dev_log(...)
2238 #endif /* SK_LOG */
2239 
2240 static int
dp_copy_to_dev(struct nx_flowswitch * fsw,struct __kern_packet * spkt,struct __kern_packet * dpkt)2241 dp_copy_to_dev(struct nx_flowswitch *fsw, struct __kern_packet *spkt,
2242     struct __kern_packet *dpkt)
2243 {
2244 	const struct kern_pbufpool *pp = dpkt->pkt_qum.qum_pp;
2245 	struct ifnet *ifp = fsw->fsw_ifp;
2246 	uint32_t dev_pkt_len;
2247 	int err = 0;
2248 
2249 	ASSERT(!(spkt->pkt_pflags & PKT_F_MBUF_MASK));
2250 	ASSERT(!(spkt->pkt_pflags & PKT_F_PKT_MASK));
2251 
2252 	SK_PREFETCHW(dpkt->pkt_qum_buf.buf_addr, 0);
2253 	/* Copy packet metadata */
2254 	_QUM_COPY(&(spkt)->pkt_qum, &(dpkt)->pkt_qum);
2255 	_PKT_COPY(spkt, dpkt);
2256 	_PKT_COPY_TX_PORT_DATA(spkt, dpkt);
2257 	ASSERT((dpkt->pkt_qum.qum_qflags & QUM_F_KERNEL_ONLY) ||
2258 	    !PP_KERNEL_ONLY(dpkt->pkt_qum.qum_pp));
2259 	ASSERT(dpkt->pkt_mbuf == NULL);
2260 
2261 	/* Copy AQM metadata */
2262 	dpkt->pkt_flowsrc_type = spkt->pkt_flowsrc_type;
2263 	dpkt->pkt_flowsrc_fidx = spkt->pkt_flowsrc_fidx;
2264 	_CASSERT((offsetof(struct __flow, flow_src_id) % 8) == 0);
2265 	_UUID_COPY(dpkt->pkt_flowsrc_id, spkt->pkt_flowsrc_id);
2266 	_UUID_COPY(dpkt->pkt_policy_euuid, spkt->pkt_policy_euuid);
2267 	dpkt->pkt_policy_id = spkt->pkt_policy_id;
2268 
2269 	switch (fsw->fsw_classq_enq_ptype) {
2270 	case QP_MBUF:
2271 		err = dp_copy_to_dev_mbuf(fsw, spkt, dpkt);
2272 		break;
2273 
2274 	case QP_PACKET:
2275 		dev_pkt_len = fsw->fsw_frame_headroom + ifp->if_tx_headroom +
2276 		    spkt->pkt_length;
2277 		if (dev_pkt_len > pp->pp_max_frags * PP_BUF_SIZE_DEF(pp)) {
2278 			FSW_STATS_INC(FSW_STATS_TX_COPY_BAD_LEN);
2279 			err = ERANGE;
2280 			goto done;
2281 		}
2282 		err = dp_copy_to_dev_pkt(fsw, spkt, dpkt);
2283 		break;
2284 
2285 	default:
2286 		VERIFY(0);
2287 		__builtin_unreachable();
2288 	}
2289 done:
2290 	dp_copy_to_dev_log(fsw, pp, spkt, dpkt, err);
2291 	return err;
2292 }
2293 
2294 static struct mbuf *
convert_pkt_to_mbuf(struct __kern_packet * pkt)2295 convert_pkt_to_mbuf(struct __kern_packet *pkt)
2296 {
2297 	ASSERT(pkt->pkt_pflags & PKT_F_MBUF_DATA);
2298 	ASSERT(pkt->pkt_mbuf != NULL);
2299 	struct mbuf *m = pkt->pkt_mbuf;
2300 
2301 	/* pass additional metadata generated from flow parse/lookup */
2302 	_CASSERT(sizeof(m->m_pkthdr.pkt_flowid) ==
2303 	    sizeof(pkt->pkt_flow_token));
2304 	_CASSERT(sizeof(m->m_pkthdr.pkt_mpriv_srcid) ==
2305 	    sizeof(pkt->pkt_flowsrc_token));
2306 	_CASSERT(sizeof(m->m_pkthdr.pkt_mpriv_fidx) ==
2307 	    sizeof(pkt->pkt_flowsrc_fidx));
2308 	m->m_pkthdr.pkt_svc = pkt->pkt_svc_class;
2309 	m->m_pkthdr.pkt_proto = pkt->pkt_flow->flow_ip_proto;
2310 	m->m_pkthdr.pkt_flowid = pkt->pkt_flow_token;
2311 	m->m_pkthdr.comp_gencnt = pkt->pkt_comp_gencnt;
2312 	m->m_pkthdr.pkt_flowsrc = pkt->pkt_flowsrc_type;
2313 	m->m_pkthdr.pkt_mpriv_srcid = pkt->pkt_flowsrc_token;
2314 	m->m_pkthdr.pkt_mpriv_fidx = pkt->pkt_flowsrc_fidx;
2315 
2316 	/* The packet should have a timestamp by the time we get here. */
2317 	m->m_pkthdr.pkt_timestamp = pkt->pkt_timestamp;
2318 	m->m_pkthdr.pkt_flags &= ~PKTF_TS_VALID;
2319 
2320 	m->m_pkthdr.pkt_flags &= ~PKT_F_COMMON_MASK;
2321 	m->m_pkthdr.pkt_flags |= (pkt->pkt_pflags & PKT_F_COMMON_MASK);
2322 	if ((pkt->pkt_pflags & PKT_F_START_SEQ) != 0) {
2323 		m->m_pkthdr.tx_start_seq = ntohl(pkt->pkt_flow_tcp_seq);
2324 	}
2325 	KPKT_CLEAR_MBUF_DATA(pkt);
2326 
2327 	/* mbuf has been consumed, release packet as well */
2328 	ASSERT(pkt->pkt_qum.qum_ksd == NULL);
2329 	pp_free_packet_single(pkt);
2330 	return m;
2331 }
2332 
2333 static void
convert_pkt_to_mbuf_list(struct __kern_packet * pkt_list,struct mbuf ** head,struct mbuf ** tail,uint32_t * cnt,uint32_t * bytes)2334 convert_pkt_to_mbuf_list(struct __kern_packet *pkt_list,
2335     struct mbuf **head, struct mbuf **tail,
2336     uint32_t *cnt, uint32_t *bytes)
2337 {
2338 	struct __kern_packet *pkt = pkt_list, *next;
2339 	struct mbuf *m_head = NULL, **m_tailp = &m_head, *m = NULL;
2340 	uint32_t c = 0, b = 0;
2341 
2342 	while (pkt != NULL) {
2343 		next = pkt->pkt_nextpkt;
2344 		pkt->pkt_nextpkt = NULL;
2345 		m = convert_pkt_to_mbuf(pkt);
2346 		ASSERT(m != NULL);
2347 
2348 		*m_tailp = m;
2349 		m_tailp = &m->m_nextpkt;
2350 		c++;
2351 		b += m_pktlen(m);
2352 		pkt = next;
2353 	}
2354 	if (head != NULL) {
2355 		*head = m_head;
2356 	}
2357 	if (tail != NULL) {
2358 		*tail = m;
2359 	}
2360 	if (cnt != NULL) {
2361 		*cnt = c;
2362 	}
2363 	if (bytes != NULL) {
2364 		*bytes = b;
2365 	}
2366 }
2367 
2368 SK_NO_INLINE_ATTRIBUTE
2369 static int
classq_enqueue_flow_single(struct nx_flowswitch * fsw,struct __kern_packet * pkt)2370 classq_enqueue_flow_single(struct nx_flowswitch *fsw,
2371     struct __kern_packet *pkt)
2372 {
2373 	struct ifnet *ifp = fsw->fsw_ifp;
2374 	boolean_t pkt_drop = FALSE;
2375 	int err;
2376 
2377 	FSW_LOCK_ASSERT_HELD(fsw);
2378 	ASSERT(fsw->fsw_classq_enabled);
2379 	ASSERT(pkt->pkt_flow_token != 0);
2380 	fsw_ifp_inc_traffic_class_out_pkt(ifp, pkt->pkt_svc_class,
2381 	    1, pkt->pkt_length);
2382 
2383 	if (__improbable(pkt->pkt_trace_id != 0)) {
2384 		KDBG(SK_KTRACE_PKT_TX_FSW | DBG_FUNC_END, pkt->pkt_trace_id);
2385 		KDBG(SK_KTRACE_PKT_TX_AQM | DBG_FUNC_START, pkt->pkt_trace_id);
2386 	}
2387 
2388 	switch (fsw->fsw_classq_enq_ptype) {
2389 	case QP_MBUF: {                         /* compat interface */
2390 		struct mbuf *m;
2391 
2392 		m = convert_pkt_to_mbuf(pkt);
2393 		ASSERT(m != NULL);
2394 		pkt = NULL;
2395 
2396 		/* ifnet_enqueue consumes mbuf */
2397 		err = ifnet_enqueue_mbuf(ifp, m, false, &pkt_drop);
2398 		m = NULL;
2399 #if (DEVELOPMENT || DEBUG)
2400 		if (__improbable(!pkt_drop)) {
2401 			_FSW_INJECT_ERROR(14, pkt_drop, TRUE, null_func);
2402 		}
2403 #endif /* DEVELOPMENT || DEBUG */
2404 		if (pkt_drop) {
2405 			FSW_STATS_INC(FSW_STATS_DROP);
2406 			FSW_STATS_INC(FSW_STATS_TX_AQM_DROP);
2407 		}
2408 		break;
2409 	}
2410 	case QP_PACKET: {                       /* native interface */
2411 		/* ifnet_enqueue consumes packet */
2412 		err = ifnet_enqueue_pkt(ifp, pkt, false, &pkt_drop);
2413 		pkt = NULL;
2414 #if (DEVELOPMENT || DEBUG)
2415 		if (__improbable(!pkt_drop)) {
2416 			_FSW_INJECT_ERROR(14, pkt_drop, TRUE, null_func);
2417 		}
2418 #endif /* DEVELOPMENT || DEBUG */
2419 		if (pkt_drop) {
2420 			FSW_STATS_INC(FSW_STATS_DROP);
2421 			FSW_STATS_INC(FSW_STATS_TX_AQM_DROP);
2422 		}
2423 		break;
2424 	}
2425 	default:
2426 		err = EINVAL;
2427 		VERIFY(0);
2428 		/* NOTREACHED */
2429 		__builtin_unreachable();
2430 	}
2431 
2432 	return err;
2433 }
2434 
2435 static int
classq_enqueue_flow_chain(struct nx_flowswitch * fsw,struct __kern_packet * pkt_head,struct __kern_packet * pkt_tail,uint32_t cnt,uint32_t bytes)2436 classq_enqueue_flow_chain(struct nx_flowswitch *fsw,
2437     struct __kern_packet *pkt_head, struct __kern_packet *pkt_tail,
2438     uint32_t cnt, uint32_t bytes)
2439 {
2440 	struct ifnet *ifp = fsw->fsw_ifp;
2441 	boolean_t pkt_drop = FALSE;
2442 	uint32_t svc;
2443 	int err;
2444 
2445 	FSW_LOCK_ASSERT_HELD(fsw);
2446 	ASSERT(fsw->fsw_classq_enabled);
2447 	ASSERT(pkt_head->pkt_flow_token != 0);
2448 
2449 	/*
2450 	 * All packets in the flow should have the same svc.
2451 	 */
2452 	svc = pkt_head->pkt_svc_class;
2453 	fsw_ifp_inc_traffic_class_out_pkt(ifp, svc, cnt, bytes);
2454 
2455 	switch (fsw->fsw_classq_enq_ptype) {
2456 	case QP_MBUF: {                         /* compat interface */
2457 		struct mbuf *m_head = NULL, *m_tail = NULL;
2458 		uint32_t c = 0, b = 0;
2459 
2460 		convert_pkt_to_mbuf_list(pkt_head, &m_head, &m_tail, &c, &b);
2461 		ASSERT(m_head != NULL && m_tail != NULL);
2462 		ASSERT(c == cnt);
2463 		ASSERT(b == bytes);
2464 		pkt_head = NULL;
2465 
2466 		/* ifnet_enqueue consumes mbuf */
2467 		err = ifnet_enqueue_mbuf_chain(ifp, m_head, m_tail, cnt,
2468 		    bytes, FALSE, &pkt_drop);
2469 		m_head = NULL;
2470 		m_tail = NULL;
2471 #if (DEVELOPMENT || DEBUG)
2472 		if (__improbable(!pkt_drop)) {
2473 			_FSW_INJECT_ERROR(14, pkt_drop, TRUE, null_func);
2474 		}
2475 #endif /* DEVELOPMENT || DEBUG */
2476 		if (pkt_drop) {
2477 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP, cnt);
2478 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_TX_AQM_DROP,
2479 			    cnt);
2480 		}
2481 		break;
2482 	}
2483 	case QP_PACKET: {                       /* native interface */
2484 		/* ifnet_enqueue consumes packet */
2485 		err = ifnet_enqueue_pkt_chain(ifp, pkt_head, pkt_tail, cnt,
2486 		    bytes, FALSE, &pkt_drop);
2487 		pkt_head = NULL;
2488 #if (DEVELOPMENT || DEBUG)
2489 		if (__improbable(!pkt_drop)) {
2490 			_FSW_INJECT_ERROR(14, pkt_drop, TRUE, null_func);
2491 		}
2492 #endif /* DEVELOPMENT || DEBUG */
2493 		if (pkt_drop) {
2494 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP, cnt);
2495 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_TX_AQM_DROP,
2496 			    cnt);
2497 		}
2498 		break;
2499 	}
2500 	default:
2501 		err = EINVAL;
2502 		VERIFY(0);
2503 		/* NOTREACHED */
2504 		__builtin_unreachable();
2505 	}
2506 
2507 	return err;
2508 }
2509 
2510 /*
2511  * This code path needs to be kept for interfaces without logical link support.
2512  */
2513 static void
classq_enqueue_flow(struct nx_flowswitch * fsw,struct flow_entry * fe,bool chain,uint32_t cnt,uint32_t bytes)2514 classq_enqueue_flow(struct nx_flowswitch *fsw, struct flow_entry *fe,
2515     bool chain, uint32_t cnt, uint32_t bytes)
2516 {
2517 	bool flowadv_is_set = false;
2518 	struct __kern_packet *pkt, *tail, *tpkt;
2519 	flowadv_idx_t flow_adv_idx;
2520 	bool flowadv_cap;
2521 	flowadv_token_t flow_adv_token;
2522 	int err;
2523 
2524 	SK_DF(SK_VERB_FSW_DP | SK_VERB_AQM, "%s classq enqueued %d pkts",
2525 	    if_name(fsw->fsw_ifp), KPKTQ_LEN(&fe->fe_tx_pktq));
2526 
2527 	if (chain) {
2528 		pkt = KPKTQ_FIRST(&fe->fe_tx_pktq);
2529 		tail = KPKTQ_LAST(&fe->fe_tx_pktq);
2530 		KPKTQ_INIT(&fe->fe_tx_pktq);
2531 		if (pkt == NULL) {
2532 			return;
2533 		}
2534 		flow_adv_idx = pkt->pkt_flowsrc_fidx;
2535 		flowadv_cap = ((pkt->pkt_pflags & PKT_F_FLOW_ADV) != 0);
2536 		flow_adv_token = pkt->pkt_flow_token;
2537 
2538 		err = classq_enqueue_flow_chain(fsw, pkt, tail, cnt, bytes);
2539 
2540 		/* set flow advisory if needed */
2541 		if (__improbable((err == EQFULL || err == EQSUSPENDED) &&
2542 		    flowadv_cap)) {
2543 			flowadv_is_set = na_flowadv_set(flow_get_na(fsw, fe),
2544 			    flow_adv_idx, flow_adv_token);
2545 		}
2546 	} else {
2547 		uint32_t c = 0, b = 0;
2548 
2549 		KPKTQ_FOREACH_SAFE(pkt, &fe->fe_tx_pktq, tpkt) {
2550 			KPKTQ_REMOVE(&fe->fe_tx_pktq, pkt);
2551 
2552 			flow_adv_idx = pkt->pkt_flowsrc_fidx;
2553 			flowadv_cap = ((pkt->pkt_pflags & PKT_F_FLOW_ADV) != 0);
2554 			flow_adv_token = pkt->pkt_flow_token;
2555 
2556 			c++;
2557 			b += pkt->pkt_length;
2558 			err = classq_enqueue_flow_single(fsw, pkt);
2559 
2560 			/* set flow advisory if needed */
2561 			if (__improbable(!flowadv_is_set &&
2562 			    ((err == EQFULL || err == EQSUSPENDED) &&
2563 			    flowadv_cap))) {
2564 				flowadv_is_set = na_flowadv_set(
2565 					flow_get_na(fsw, fe), flow_adv_idx,
2566 					flow_adv_token);
2567 			}
2568 		}
2569 		ASSERT(c == cnt);
2570 		ASSERT(b == bytes);
2571 	}
2572 
2573 	/* notify flow advisory event */
2574 	if (__improbable(flowadv_is_set)) {
2575 		struct __kern_channel_ring *r = fsw_flow_get_tx_ring(fsw, fe);
2576 		if (__probable(r)) {
2577 			na_flowadv_event(r);
2578 			SK_DF(SK_VERB_FLOW_ADVISORY | SK_VERB_TX,
2579 			    "%s(%d) notified of flow update",
2580 			    sk_proc_name_address(current_proc()),
2581 			    sk_proc_pid(current_proc()));
2582 		}
2583 	}
2584 }
2585 
2586 /*
2587  * Logical link code path
2588  */
2589 static void
classq_qset_enqueue_flow(struct nx_flowswitch * fsw,struct flow_entry * fe,bool chain,uint32_t cnt,uint32_t bytes)2590 classq_qset_enqueue_flow(struct nx_flowswitch *fsw, struct flow_entry *fe,
2591     bool chain, uint32_t cnt, uint32_t bytes)
2592 {
2593 	struct __kern_packet *pkt, *tail;
2594 	flowadv_idx_t flow_adv_idx;
2595 	bool flowadv_is_set = false;
2596 	bool flowadv_cap;
2597 	flowadv_token_t flow_adv_token;
2598 	uint32_t flowctl = 0, dropped = 0;
2599 	int err;
2600 
2601 	SK_DF(SK_VERB_FSW_DP | SK_VERB_AQM, "%s classq enqueued %d pkts",
2602 	    if_name(fsw->fsw_ifp), KPKTQ_LEN(&fe->fe_tx_pktq));
2603 
2604 	/*
2605 	 * Not supporting chains for now
2606 	 */
2607 	VERIFY(!chain);
2608 	pkt = KPKTQ_FIRST(&fe->fe_tx_pktq);
2609 	tail = KPKTQ_LAST(&fe->fe_tx_pktq);
2610 	KPKTQ_INIT(&fe->fe_tx_pktq);
2611 	if (pkt == NULL) {
2612 		return;
2613 	}
2614 	flow_adv_idx = pkt->pkt_flowsrc_fidx;
2615 	flowadv_cap = ((pkt->pkt_pflags & PKT_F_FLOW_ADV) != 0);
2616 	flow_adv_token = pkt->pkt_flow_token;
2617 
2618 	err = netif_qset_enqueue(fe->fe_qset, pkt, tail, cnt, bytes,
2619 	    &flowctl, &dropped);
2620 
2621 	if (__improbable(err != 0)) {
2622 		/* set flow advisory if needed */
2623 		if (flowctl > 0 && flowadv_cap) {
2624 			flowadv_is_set = na_flowadv_set(flow_get_na(fsw, fe),
2625 			    flow_adv_idx, flow_adv_token);
2626 
2627 			/* notify flow advisory event */
2628 			if (flowadv_is_set) {
2629 				struct __kern_channel_ring *r =
2630 				    fsw_flow_get_tx_ring(fsw, fe);
2631 				if (__probable(r)) {
2632 					na_flowadv_event(r);
2633 					SK_DF(SK_VERB_FLOW_ADVISORY |
2634 					    SK_VERB_TX,
2635 					    "%s(%d) notified of flow update",
2636 					    sk_proc_name_address(current_proc()),
2637 					    sk_proc_pid(current_proc()));
2638 				}
2639 			}
2640 		}
2641 		if (dropped > 0) {
2642 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_DROP, dropped);
2643 			STATS_ADD(&fsw->fsw_stats, FSW_STATS_TX_AQM_DROP,
2644 			    dropped);
2645 		}
2646 	}
2647 }
2648 
2649 static void
tx_finalize_packet(struct nx_flowswitch * fsw,struct __kern_packet * pkt)2650 tx_finalize_packet(struct nx_flowswitch *fsw, struct __kern_packet *pkt)
2651 {
2652 #pragma unused(fsw)
2653 	/* finalize here; no more changes to buflets after classq */
2654 	if (__probable(!(pkt->pkt_pflags & PKT_F_MBUF_DATA))) {
2655 		kern_packet_t ph = SK_PTR_ENCODE(pkt,
2656 		    METADATA_TYPE(pkt), METADATA_SUBTYPE(pkt));
2657 		int err = __packet_finalize(ph);
2658 		VERIFY(err == 0);
2659 	}
2660 }
2661 
2662 static bool
dp_flow_tx_route_process(struct nx_flowswitch * fsw,struct flow_entry * fe)2663 dp_flow_tx_route_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
2664 {
2665 	struct flow_route *fr = fe->fe_route;
2666 	int err;
2667 
2668 	ASSERT(fr != NULL);
2669 
2670 	if (__improbable(!dp_flow_route_process(fsw, fe))) {
2671 		return false;
2672 	}
2673 	if (fe->fe_qset_select == FE_QSET_SELECT_DYNAMIC) {
2674 		flow_qset_select_dynamic(fsw, fe, TRUE);
2675 	}
2676 
2677 	_FSW_INJECT_ERROR(35, fr->fr_flags, fr->fr_flags,
2678 	    _fsw_error35_handler, 1, fr, NULL, NULL);
2679 	_FSW_INJECT_ERROR(36, fr->fr_flags, fr->fr_flags,
2680 	    _fsw_error36_handler, 1, fr, NULL);
2681 
2682 	/*
2683 	 * See if we need to resolve the flow route; note the test against
2684 	 * fr_flags here is done without any lock for performance.  Thus
2685 	 * it's possible that we race against the thread performing route
2686 	 * event updates for a packet (which is OK).  In any case we should
2687 	 * not have any assertion on fr_flags value(s) due to the lack of
2688 	 * serialization.
2689 	 */
2690 	if (fr->fr_flags & FLOWRTF_RESOLVED) {
2691 		goto frame;
2692 	}
2693 
2694 	struct __kern_packet *pkt, *tpkt;
2695 	KPKTQ_FOREACH_SAFE(pkt, &fe->fe_tx_pktq, tpkt) {
2696 		err = fsw->fsw_resolve(fsw, fr, pkt);
2697 		_FSW_INJECT_ERROR_SET(35, _fsw_error35_handler, 2, fr, pkt, &err);
2698 		_FSW_INJECT_ERROR_SET(36, _fsw_error36_handler, 2, fr, &err);
2699 		/*
2700 		 * If resolver returns EJUSTRETURN then we drop the pkt as the
2701 		 * resolver should have converted the pkt into mbuf (or
2702 		 * detached the attached mbuf from pkt) and added it to the
2703 		 * llinfo queue. If we do have a cached llinfo, then proceed
2704 		 * to using it even though it may be stale (very unlikely)
2705 		 * while the resolution is in progress.
2706 		 * Otherwise, any other error results in dropping pkt.
2707 		 */
2708 		if (err == EJUSTRETURN) {
2709 			KPKTQ_REMOVE(&fe->fe_tx_pktq, pkt);
2710 			pp_free_packet_single(pkt);
2711 			FSW_STATS_INC(FSW_STATS_TX_RESOLV_PENDING);
2712 			continue;
2713 		} else if (err != 0 && (fr->fr_flags & FLOWRTF_HAS_LLINFO)) {
2714 			/* use existing llinfo */
2715 			FSW_STATS_INC(FSW_STATS_TX_RESOLV_STALE);
2716 		} else if (err != 0) {
2717 			KPKTQ_REMOVE(&fe->fe_tx_pktq, pkt);
2718 			pp_free_packet_single(pkt);
2719 			FSW_STATS_INC(FSW_STATS_TX_RESOLV_FAIL);
2720 			continue;
2721 		}
2722 	}
2723 
2724 frame:
2725 	KPKTQ_FOREACH_SAFE(pkt, &fe->fe_tx_pktq, tpkt) {
2726 		if (fsw->fsw_frame != NULL) {
2727 			fsw->fsw_frame(fsw, fr, pkt);
2728 		}
2729 	}
2730 
2731 	return true;
2732 }
2733 
2734 static void
dp_listener_flow_tx_process(struct nx_flowswitch * fsw,struct flow_entry * fe)2735 dp_listener_flow_tx_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
2736 {
2737 #pragma unused(fsw)
2738 	struct __kern_packet *pkt, *tpkt;
2739 	KPKTQ_FOREACH_SAFE(pkt, &fe->fe_tx_pktq, tpkt) {
2740 		KPKTQ_REMOVE(&fe->fe_tx_pktq, pkt);
2741 		/* listener is only allowed TCP RST */
2742 		if (pkt->pkt_flow_ip_proto == IPPROTO_TCP &&
2743 		    (pkt->pkt_flow_tcp_flags & TH_RST) != 0) {
2744 			flow_track_abort_tcp(fe, NULL, pkt);
2745 		} else {
2746 			char *addr;
2747 			MD_BUFLET_ADDR_ABS(pkt, addr);
2748 			SK_ERR("listener flow sends non-RST packet %s",
2749 			    sk_dump(sk_proc_name_address(current_proc()),
2750 			    addr, pkt->pkt_length, 128, NULL, 0));
2751 		}
2752 		pp_free_packet_single(pkt);
2753 	}
2754 }
2755 
2756 static void
fsw_update_timestamps(struct __kern_packet * pkt,volatile uint64_t * fg_ts,volatile uint64_t * rt_ts,ifnet_t ifp)2757 fsw_update_timestamps(struct __kern_packet *pkt, volatile uint64_t *fg_ts,
2758     volatile uint64_t *rt_ts, ifnet_t ifp)
2759 {
2760 	struct timespec now;
2761 	uint64_t now_nsec = 0;
2762 
2763 	if (!(pkt->pkt_pflags & PKT_F_TS_VALID) || pkt->pkt_timestamp == 0) {
2764 		nanouptime(&now);
2765 		net_timernsec(&now, &now_nsec);
2766 		pkt->pkt_timestamp = now_nsec;
2767 	}
2768 	pkt->pkt_pflags &= ~PKT_F_TS_VALID;
2769 
2770 	/*
2771 	 * If the packet service class is not background,
2772 	 * update the timestamps on the interface, as well as
2773 	 * the ones in nexus-wide advisory to indicate recent
2774 	 * activity on a foreground flow.
2775 	 */
2776 	if (!(pkt->pkt_pflags & PKT_F_BACKGROUND)) {
2777 		ifp->if_fg_sendts = (uint32_t)_net_uptime;
2778 		if (fg_ts != NULL) {
2779 			*fg_ts = _net_uptime;
2780 		}
2781 	}
2782 	if (pkt->pkt_pflags & PKT_F_REALTIME) {
2783 		ifp->if_rt_sendts = (uint32_t)_net_uptime;
2784 		if (rt_ts != NULL) {
2785 			*rt_ts = _net_uptime;
2786 		}
2787 	}
2788 }
2789 
2790 /*
2791  * TODO:
2792  * We can check the flow entry as well to only allow chain enqueue
2793  * on flows matching a certain criteria.
2794  */
2795 static bool
fsw_chain_enqueue_enabled(struct nx_flowswitch * fsw,struct flow_entry * fe)2796 fsw_chain_enqueue_enabled(struct nx_flowswitch *fsw, struct flow_entry *fe)
2797 {
2798 #pragma unused(fe)
2799 	return fsw_chain_enqueue != 0 &&
2800 	       fsw->fsw_ifp->if_output_netem == NULL &&
2801 	       (fsw->fsw_ifp->if_eflags & IFEF_ENQUEUE_MULTI) == 0 &&
2802 	       fe->fe_qset == NULL;
2803 }
2804 
2805 void
dp_flow_tx_process(struct nx_flowswitch * fsw,struct flow_entry * fe)2806 dp_flow_tx_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
2807 {
2808 	struct pktq dropped_pkts;
2809 	bool chain;
2810 	uint32_t cnt = 0, bytes = 0;
2811 	volatile struct sk_nexusadv *nxadv = NULL;
2812 	volatile uint64_t *fg_ts = NULL;
2813 	volatile uint64_t *rt_ts = NULL;
2814 	uint8_t qset_idx = (fe->fe_qset != NULL) ? fe->fe_qset->nqs_idx : 0;
2815 
2816 	KPKTQ_INIT(&dropped_pkts);
2817 	ASSERT(!KPKTQ_EMPTY(&fe->fe_tx_pktq));
2818 	if (__improbable(fe->fe_flags & FLOWENTF_LISTENER)) {
2819 		dp_listener_flow_tx_process(fsw, fe);
2820 		return;
2821 	}
2822 	if (__improbable(!dp_flow_tx_route_process(fsw, fe))) {
2823 		SK_RDERR(5, "Tx route bad");
2824 		FSW_STATS_ADD(FSW_STATS_TX_FLOW_NONVIABLE,
2825 		    KPKTQ_LEN(&fe->fe_tx_pktq));
2826 		KPKTQ_CONCAT(&dropped_pkts, &fe->fe_tx_pktq);
2827 		goto done;
2828 	}
2829 	chain = fsw_chain_enqueue_enabled(fsw, fe);
2830 	if (chain) {
2831 		nxadv = fsw->fsw_nx->nx_adv.flowswitch_nxv_adv;
2832 		if (nxadv != NULL) {
2833 			fg_ts = &nxadv->nxadv_fg_sendts;
2834 			rt_ts = &nxadv->nxadv_rt_sendts;
2835 		}
2836 	}
2837 	struct __kern_packet *pkt, *tpkt;
2838 	KPKTQ_FOREACH_SAFE(pkt, &fe->fe_tx_pktq, tpkt) {
2839 		int err = flow_pkt_track(fe, pkt, false);
2840 		if (__improbable(err != 0)) {
2841 			SK_RDERR(5, "flow_pkt_track failed (err %d)", err);
2842 			FSW_STATS_INC(FSW_STATS_TX_FLOW_TRACK_ERR);
2843 			KPKTQ_REMOVE(&fe->fe_tx_pktq, pkt);
2844 			KPKTQ_ENQUEUE(&dropped_pkts, pkt);
2845 			continue;
2846 		}
2847 
2848 		_UUID_COPY(pkt->pkt_policy_euuid, fe->fe_eproc_uuid);
2849 		pkt->pkt_transport_protocol = fe->fe_transport_protocol;
2850 
2851 		/* set AQM related values for outgoing packet */
2852 		if (fe->fe_adv_idx != FLOWADV_IDX_NONE) {
2853 			pkt->pkt_pflags |= PKT_F_FLOW_ADV;
2854 			pkt->pkt_flowsrc_type = FLOWSRC_CHANNEL;
2855 			pkt->pkt_flowsrc_fidx = fe->fe_adv_idx;
2856 		} else {
2857 			pkt->pkt_pflags &= ~PKT_F_FLOW_ADV;
2858 		}
2859 		_UUID_CLEAR(pkt->pkt_flow_id);
2860 		pkt->pkt_flow_token = fe->fe_flowid;
2861 		pkt->pkt_pflags |= PKT_F_FLOW_ID;
2862 		pkt->pkt_qset_idx = qset_idx;
2863 		/*
2864 		 * The same code is exercised per packet for the non-chain case
2865 		 * (see ifnet_enqueue_ifclassq()). It's replicated here to avoid
2866 		 * re-walking the chain later.
2867 		 */
2868 		if (chain) {
2869 			fsw_update_timestamps(pkt, fg_ts, rt_ts, fsw->fsw_ifp);
2870 		}
2871 		/* mark packet tos/svc_class */
2872 		fsw_qos_mark(fsw, fe, pkt);
2873 
2874 		tx_finalize_packet(fsw, pkt);
2875 		bytes += pkt->pkt_length;
2876 		cnt++;
2877 	}
2878 
2879 	/* snoop after it's finalized */
2880 	if (__improbable(pktap_total_tap_count != 0)) {
2881 		fsw_snoop(fsw, fe, false);
2882 	}
2883 	if (fe->fe_qset != NULL) {
2884 		classq_qset_enqueue_flow(fsw, fe, chain, cnt, bytes);
2885 	} else {
2886 		classq_enqueue_flow(fsw, fe, chain, cnt, bytes);
2887 	}
2888 done:
2889 	dp_drop_pktq(fsw, &dropped_pkts);
2890 }
2891 
2892 static struct flow_entry *
tx_process_continuous_ip_frag(struct nx_flowswitch * fsw,struct flow_entry * prev_fe,struct __kern_packet * pkt)2893 tx_process_continuous_ip_frag(struct nx_flowswitch *fsw,
2894     struct flow_entry *prev_fe, struct __kern_packet *pkt)
2895 {
2896 	ASSERT(!pkt->pkt_flow_ip_is_first_frag);
2897 
2898 	if (__improbable(pkt->pkt_flow_ip_frag_id == 0)) {
2899 		FSW_STATS_INC(FSW_STATS_TX_FRAG_BAD_ID);
2900 		SK_ERR("%s(%d) invalid zero fragment id",
2901 		    sk_proc_name_address(current_proc()),
2902 		    sk_proc_pid(current_proc()));
2903 		return NULL;
2904 	}
2905 
2906 	SK_DF(SK_VERB_FSW_DP | SK_VERB_TX,
2907 	    "%s(%d) continuation frag, id %u",
2908 	    sk_proc_name_address(current_proc()),
2909 	    sk_proc_pid(current_proc()),
2910 	    pkt->pkt_flow_ip_frag_id);
2911 	if (__improbable(prev_fe == NULL ||
2912 	    !prev_fe->fe_tx_is_cont_frag)) {
2913 		SK_ERR("%s(%d) unexpected continuation frag",
2914 		    sk_proc_name_address(current_proc()),
2915 		    sk_proc_pid(current_proc()),
2916 		    pkt->pkt_flow_ip_frag_id);
2917 		FSW_STATS_INC(FSW_STATS_TX_FRAG_BAD_CONT);
2918 		return NULL;
2919 	}
2920 	if (__improbable(pkt->pkt_flow_ip_frag_id !=
2921 	    prev_fe->fe_tx_frag_id)) {
2922 		FSW_STATS_INC(FSW_STATS_TX_FRAG_BAD_CONT);
2923 		SK_ERR("%s(%d) wrong continuation frag id %u expecting %u",
2924 		    sk_proc_name_address(current_proc()),
2925 		    sk_proc_pid(current_proc()),
2926 		    pkt->pkt_flow_ip_frag_id,
2927 		    prev_fe->fe_tx_frag_id);
2928 		return NULL;
2929 	}
2930 
2931 	return prev_fe;
2932 }
2933 
2934 static struct flow_entry *
tx_lookup_flow(struct nx_flowswitch * fsw,struct __kern_packet * pkt,struct flow_entry * prev_fe)2935 tx_lookup_flow(struct nx_flowswitch *fsw, struct __kern_packet *pkt,
2936     struct flow_entry *prev_fe)
2937 {
2938 	struct flow_entry *fe;
2939 
2940 	fe = lookup_flow_with_pkt(fsw, pkt, false, prev_fe);
2941 	if (__improbable(fe == NULL)) {
2942 		goto done;
2943 	}
2944 
2945 	if (__improbable(fe->fe_flags & FLOWENTF_TORN_DOWN)) {
2946 		SK_RDERR(5, "Tx flow torn down");
2947 		FSW_STATS_INC(FSW_STATS_TX_FLOW_TORNDOWN);
2948 		flow_entry_release(&fe);
2949 		goto done;
2950 	}
2951 
2952 	_FSW_INJECT_ERROR(34, pkt->pkt_flow_id[0], fe->fe_uuid[0] + 1,
2953 	    null_func);
2954 
2955 	if (__improbable(!_UUID_MATCH(pkt->pkt_flow_id, fe->fe_uuid))) {
2956 		uuid_string_t flow_id_str, pkt_id_str;
2957 		sk_uuid_unparse(fe->fe_uuid, flow_id_str);
2958 		sk_uuid_unparse(pkt->pkt_flow_id, pkt_id_str);
2959 		SK_ERR("pkt flow id %s != flow id %s", pkt_id_str, flow_id_str);
2960 		flow_entry_release(&fe);
2961 		FSW_STATS_INC(FSW_STATS_TX_FLOW_BAD_ID);
2962 	}
2963 
2964 done:
2965 	return fe;
2966 }
2967 
2968 static inline void
tx_flow_process(struct nx_flowswitch * fsw,struct flow_entry * fe)2969 tx_flow_process(struct nx_flowswitch *fsw, struct flow_entry *fe)
2970 {
2971 	ASSERT(!KPKTQ_EMPTY(&fe->fe_tx_pktq));
2972 	ASSERT(KPKTQ_LEN(&fe->fe_tx_pktq) != 0);
2973 
2974 	SK_DF(SK_VERB_FSW_DP | SK_VERB_TX, "TX %d pkts from fe %p port %d",
2975 	    KPKTQ_LEN(&fe->fe_tx_pktq), fe, fe->fe_nx_port);
2976 
2977 	/* flow related processing (default, agg, etc.) */
2978 	fe->fe_tx_process(fsw, fe);
2979 
2980 	KPKTQ_FINI(&fe->fe_tx_pktq);
2981 }
2982 
2983 #if SK_LOG
2984 static void
dp_tx_log_pkt(uint64_t verb,char * desc,struct __kern_packet * pkt)2985 dp_tx_log_pkt(uint64_t verb, char *desc, struct __kern_packet *pkt)
2986 {
2987 	char *pkt_buf;
2988 	MD_BUFLET_ADDR_ABS(pkt, pkt_buf);
2989 	SK_DF(verb, "%s(%d) %s %s", sk_proc_name_address(current_proc()),
2990 	    sk_proc_pid(current_proc()), desc, sk_dump("buf", pkt_buf,
2991 	    pkt->pkt_length, 128, NULL, 0));
2992 }
2993 #else /* !SK_LOG */
2994 #define dp_tx_log_pkt(...)
2995 #endif /* !SK_LOG */
2996 
2997 static void
dp_tx_pktq(struct nx_flowswitch * fsw,struct pktq * spktq)2998 dp_tx_pktq(struct nx_flowswitch *fsw, struct pktq *spktq)
2999 {
3000 	struct __kern_packet *spkt, *pkt;
3001 	struct flow_entry_list fes = TAILQ_HEAD_INITIALIZER(fes);
3002 	struct flow_entry *fe, *prev_fe;
3003 	struct pktq dropped_pkts, dpktq;
3004 	struct nexus_adapter *dev_na;
3005 	struct kern_pbufpool *dev_pp;
3006 	struct ifnet *ifp;
3007 	sa_family_t af;
3008 	uint32_t n_pkts, n_flows = 0;
3009 
3010 	int err;
3011 	KPKTQ_INIT(&dpktq);
3012 	KPKTQ_INIT(&dropped_pkts);
3013 	n_pkts = KPKTQ_LEN(spktq);
3014 
3015 	FSW_RLOCK(fsw);
3016 	if (__improbable(FSW_QUIESCED(fsw))) {
3017 		DTRACE_SKYWALK1(tx__quiesced, struct nx_flowswitch *, fsw);
3018 		SK_ERR("flowswitch detached, dropping %d pkts", n_pkts);
3019 		KPKTQ_CONCAT(&dropped_pkts, spktq);
3020 		goto done;
3021 	}
3022 	dev_na = fsw->fsw_dev_ch->ch_na;
3023 	if (__improbable(dev_na == NULL)) {
3024 		SK_ERR("dev port not attached, dropping %d pkts", n_pkts);
3025 		FSW_STATS_ADD(FSW_STATS_DST_NXPORT_INACTIVE, n_pkts);
3026 		KPKTQ_CONCAT(&dropped_pkts, spktq);
3027 		goto done;
3028 	}
3029 	/*
3030 	 * fsw_ifp should still be valid at this point. If fsw is detached
3031 	 * after fsw_lock is released, this ifp will remain valid and
3032 	 * netif_transmit() will behave properly even if the ifp is in
3033 	 * detached state.
3034 	 */
3035 	ifp = fsw->fsw_ifp;
3036 
3037 	/* batch allocate enough packets */
3038 	dev_pp = na_kr_get_pp(dev_na, NR_TX);
3039 
3040 	err = pp_alloc_pktq(dev_pp, dev_pp->pp_max_frags, &dpktq, n_pkts, NULL,
3041 	    NULL, SKMEM_NOSLEEP);
3042 #if DEVELOPMENT || DEBUG
3043 	if (__probable(err != ENOMEM)) {
3044 		_FSW_INJECT_ERROR(12, err, ENOMEM, pp_free_pktq, &dpktq);
3045 	}
3046 #endif /* DEVELOPMENT || DEBUG */
3047 	if (__improbable(err == ENOMEM)) {
3048 		ASSERT(KPKTQ_EMPTY(&dpktq));
3049 		KPKTQ_CONCAT(&dropped_pkts, spktq);
3050 		FSW_STATS_ADD(FSW_STATS_DROP_NOMEM_PKT, n_pkts);
3051 		SK_ERR("failed to alloc %u pkts from device pool", n_pkts);
3052 		goto done;
3053 	} else if (__improbable(err == EAGAIN)) {
3054 		FSW_STATS_ADD(FSW_STATS_DROP_NOMEM_PKT,
3055 		    (n_pkts - KPKTQ_LEN(&dpktq)));
3056 		FSW_STATS_ADD(FSW_STATS_DROP,
3057 		    (n_pkts - KPKTQ_LEN(&dpktq)));
3058 	}
3059 
3060 	n_pkts = KPKTQ_LEN(&dpktq);
3061 	prev_fe = NULL;
3062 	KPKTQ_FOREACH(spkt, spktq) {
3063 		if (n_pkts == 0) {
3064 			break;
3065 		}
3066 		--n_pkts;
3067 
3068 		KPKTQ_DEQUEUE(&dpktq, pkt);
3069 		ASSERT(pkt != NULL);
3070 		err = dp_copy_to_dev(fsw, spkt, pkt);
3071 		if (__improbable(err != 0)) {
3072 			KPKTQ_ENQUEUE(&dropped_pkts, pkt);
3073 			continue;
3074 		}
3075 
3076 		af = fsw_ip_demux(fsw, pkt);
3077 		if (__improbable(af == AF_UNSPEC)) {
3078 			dp_tx_log_pkt(SK_VERB_ERROR, "demux err", pkt);
3079 			FSW_STATS_INC(FSW_STATS_TX_DEMUX_ERR);
3080 			KPKTQ_ENQUEUE(&dropped_pkts, pkt);
3081 			continue;
3082 		}
3083 
3084 		err = flow_pkt_classify(pkt, ifp, af, false);
3085 		if (__improbable(err != 0)) {
3086 			dp_tx_log_pkt(SK_VERB_ERROR, "flow extract err", pkt);
3087 			FSW_STATS_INC(FSW_STATS_TX_FLOW_EXTRACT_ERR);
3088 			KPKTQ_ENQUEUE(&dropped_pkts, pkt);
3089 			continue;
3090 		}
3091 
3092 		if (__improbable(pkt->pkt_flow_ip_is_frag &&
3093 		    !pkt->pkt_flow_ip_is_first_frag)) {
3094 			fe = tx_process_continuous_ip_frag(fsw, prev_fe, pkt);
3095 			if (__probable(fe != NULL)) {
3096 				flow_entry_retain(fe);
3097 				goto flow_batch;
3098 			} else {
3099 				FSW_STATS_INC(FSW_STATS_TX_FRAG_BAD_CONT);
3100 				KPKTQ_ENQUEUE(&dropped_pkts, pkt);
3101 				continue;
3102 			}
3103 		}
3104 
3105 		fe = tx_lookup_flow(fsw, pkt, prev_fe);
3106 		if (__improbable(fe == NULL)) {
3107 			FSW_STATS_INC(FSW_STATS_TX_FLOW_NOT_FOUND);
3108 			KPKTQ_ENQUEUE(&dropped_pkts, pkt);
3109 			prev_fe = NULL;
3110 			continue;
3111 		}
3112 flow_batch:
3113 		tx_flow_batch_packet(&fes, fe, pkt);
3114 		prev_fe = fe;
3115 	}
3116 
3117 	struct flow_entry *tfe = NULL;
3118 	TAILQ_FOREACH_SAFE(fe, &fes, fe_tx_link, tfe) {
3119 		tx_flow_process(fsw, fe);
3120 		TAILQ_REMOVE(&fes, fe, fe_tx_link);
3121 		fe->fe_tx_is_cont_frag = false;
3122 		fe->fe_tx_frag_id = 0;
3123 		flow_entry_release(&fe);
3124 		n_flows++;
3125 	}
3126 
3127 done:
3128 	FSW_RUNLOCK(fsw);
3129 	if (n_flows > 0) {
3130 		netif_transmit(ifp, NETIF_XMIT_FLAG_CHANNEL);
3131 	}
3132 	dp_drop_pktq(fsw, &dropped_pkts);
3133 	KPKTQ_FINI(&dropped_pkts);
3134 	KPKTQ_FINI(&dpktq);
3135 }
3136 
3137 static inline void
fsw_dev_ring_flush(struct nx_flowswitch * fsw,struct __kern_channel_ring * r,struct proc * p)3138 fsw_dev_ring_flush(struct nx_flowswitch *fsw, struct __kern_channel_ring *r,
3139     struct proc *p)
3140 {
3141 #pragma unused(p)
3142 	uint32_t total_pkts = 0, total_bytes = 0;
3143 
3144 	for (;;) {
3145 		struct pktq pktq;
3146 		KPKTQ_INIT(&pktq);
3147 		uint32_t n_bytes;
3148 		fsw_ring_dequeue_pktq(fsw, r, fsw_rx_batch, &pktq, &n_bytes);
3149 		if (n_bytes == 0) {
3150 			break;
3151 		}
3152 		total_pkts += KPKTQ_LEN(&pktq);
3153 		total_bytes += n_bytes;
3154 
3155 		if (__probable(fsw->fsw_ifp->if_input_netem == NULL)) {
3156 			fsw_receive(fsw, &pktq);
3157 		} else {
3158 			fsw_dev_input_netem_enqueue(fsw, &pktq);
3159 		}
3160 		KPKTQ_FINI(&pktq);
3161 	}
3162 
3163 	KDBG(SK_KTRACE_FSW_DEV_RING_FLUSH, SK_KVA(r), total_pkts, total_bytes);
3164 	DTRACE_SKYWALK2(fsw__dp__dev__ring__flush, uint32_t, total_pkts,
3165 	    uint32_t, total_bytes);
3166 
3167 	/* compute mitigation rate for delivered traffic */
3168 	if (__probable(r->ckr_netif_mit_stats != NULL)) {
3169 		r->ckr_netif_mit_stats(r, total_pkts, total_bytes);
3170 	}
3171 }
3172 
3173 static inline void
fsw_user_ring_flush(struct nx_flowswitch * fsw,struct __kern_channel_ring * r,struct proc * p)3174 fsw_user_ring_flush(struct nx_flowswitch *fsw, struct __kern_channel_ring *r,
3175     struct proc *p)
3176 {
3177 #pragma unused(p)
3178 	static packet_trace_id_t trace_id = 0;
3179 	uint32_t total_pkts = 0, total_bytes = 0;
3180 
3181 	for (;;) {
3182 		struct pktq pktq;
3183 		KPKTQ_INIT(&pktq);
3184 		uint32_t n_bytes;
3185 		fsw_ring_dequeue_pktq(fsw, r, fsw_tx_batch, &pktq, &n_bytes);
3186 		if (n_bytes == 0) {
3187 			break;
3188 		}
3189 		total_pkts += KPKTQ_LEN(&pktq);
3190 		total_bytes += n_bytes;
3191 
3192 		KPKTQ_FIRST(&pktq)->pkt_trace_id = ++trace_id;
3193 		KDBG(SK_KTRACE_PKT_TX_FSW | DBG_FUNC_START, KPKTQ_FIRST(&pktq)->pkt_trace_id);
3194 
3195 		dp_tx_pktq(fsw, &pktq);
3196 		dp_free_pktq(fsw, &pktq);
3197 		KPKTQ_FINI(&pktq);
3198 	}
3199 
3200 	kr_update_stats(r, total_pkts, total_bytes);
3201 
3202 	KDBG(SK_KTRACE_FSW_USER_RING_FLUSH, SK_KVA(r), total_pkts, total_bytes);
3203 	DTRACE_SKYWALK2(fsw__dp__user__ring__flush, uint32_t, total_pkts,
3204 	    uint32_t, total_bytes);
3205 }
3206 
3207 void
fsw_ring_flush(struct nx_flowswitch * fsw,struct __kern_channel_ring * r,struct proc * p)3208 fsw_ring_flush(struct nx_flowswitch *fsw, struct __kern_channel_ring *r,
3209     struct proc *p)
3210 {
3211 	struct nexus_vp_adapter *vpna = VPNA(KRNA(r));
3212 
3213 	ASSERT(sk_is_sync_protected());
3214 	ASSERT(vpna->vpna_nx_port != FSW_VP_HOST);
3215 	ASSERT(vpna->vpna_up.na_md_type == NEXUS_META_TYPE_PACKET);
3216 
3217 	if (vpna->vpna_nx_port == FSW_VP_DEV) {
3218 		fsw_dev_ring_flush(fsw, r, p);
3219 	} else {
3220 		fsw_user_ring_flush(fsw, r, p);
3221 	}
3222 }
3223 
3224 int
fsw_dp_ctor(struct nx_flowswitch * fsw)3225 fsw_dp_ctor(struct nx_flowswitch *fsw)
3226 {
3227 	uint32_t fe_cnt = fsw_fe_table_size;
3228 	uint32_t fob_cnt = fsw_flow_owner_buckets;
3229 	uint32_t frb_cnt = fsw_flow_route_buckets;
3230 	uint32_t frib_cnt = fsw_flow_route_id_buckets;
3231 	struct kern_nexus *nx = fsw->fsw_nx;
3232 	char name[64];
3233 	int error = 0;
3234 
3235 	/* just in case */
3236 	if (fe_cnt == 0) {
3237 		fe_cnt = NX_FSW_FE_TABLESZ;
3238 		ASSERT(fe_cnt != 0);
3239 	}
3240 	if (fob_cnt == 0) {
3241 		fob_cnt = NX_FSW_FOB_HASHSZ;
3242 		ASSERT(fob_cnt != 0);
3243 	}
3244 	if (frb_cnt == 0) {
3245 		frb_cnt = NX_FSW_FRB_HASHSZ;
3246 		ASSERT(frb_cnt != 0);
3247 	}
3248 	if (frib_cnt == 0) {
3249 		frib_cnt = NX_FSW_FRIB_HASHSZ;
3250 		ASSERT(frib_cnt != 0);
3251 	}
3252 
3253 	/* make sure fe_cnt is a power of two, else round up */
3254 	if ((fe_cnt & (fe_cnt - 1)) != 0) {
3255 		fe_cnt--;
3256 		fe_cnt |= (fe_cnt >> 1);
3257 		fe_cnt |= (fe_cnt >> 2);
3258 		fe_cnt |= (fe_cnt >> 4);
3259 		fe_cnt |= (fe_cnt >> 8);
3260 		fe_cnt |= (fe_cnt >> 16);
3261 		fe_cnt++;
3262 	}
3263 
3264 	/* make sure frb_cnt is a power of two, else round up */
3265 	if ((frb_cnt & (frb_cnt - 1)) != 0) {
3266 		frb_cnt--;
3267 		frb_cnt |= (frb_cnt >> 1);
3268 		frb_cnt |= (frb_cnt >> 2);
3269 		frb_cnt |= (frb_cnt >> 4);
3270 		frb_cnt |= (frb_cnt >> 8);
3271 		frb_cnt |= (frb_cnt >> 16);
3272 		frb_cnt++;
3273 	}
3274 
3275 	lck_mtx_init(&fsw->fsw_detach_barrier_lock, &nexus_lock_group,
3276 	    &nexus_lock_attr);
3277 	lck_mtx_init(&fsw->fsw_reap_lock, &nexus_lock_group, &nexus_lock_attr);
3278 	lck_mtx_init(&fsw->fsw_linger_lock, &nexus_lock_group, &nexus_lock_attr);
3279 	TAILQ_INIT(&fsw->fsw_linger_head);
3280 
3281 	(void) snprintf(name, sizeof(name), "%s_%llu", NX_FSW_NAME, nx->nx_id);
3282 	error = nx_advisory_alloc(nx, name,
3283 	    &NX_PROV(nx)->nxprov_region_params[SKMEM_REGION_NEXUSADV],
3284 	    NEXUS_ADVISORY_TYPE_FLOWSWITCH);
3285 	if (error != 0) {
3286 		fsw_dp_dtor(fsw);
3287 		return error;
3288 	}
3289 
3290 	fsw->fsw_flow_mgr = flow_mgr_create(fe_cnt, fob_cnt, frb_cnt, frib_cnt);
3291 	if (fsw->fsw_flow_mgr == NULL) {
3292 		fsw_dp_dtor(fsw);
3293 		return error;
3294 	}
3295 
3296 	/* generic name; will be customized upon ifattach */
3297 	(void) snprintf(fsw->fsw_reap_name, sizeof(fsw->fsw_reap_name),
3298 	    FSW_REAP_THREADNAME, name, "");
3299 
3300 	if (kernel_thread_start(fsw_reap_thread_func, fsw,
3301 	    &fsw->fsw_reap_thread) != KERN_SUCCESS) {
3302 		panic_plain("%s: can't create thread", __func__);
3303 		/* NOTREACHED */
3304 		__builtin_unreachable();
3305 	}
3306 	/* this must not fail */
3307 	VERIFY(fsw->fsw_reap_thread != NULL);
3308 
3309 	SK_DF(SK_VERB_MEM, "fsw 0x%llx ALLOC", SK_KVA(fsw));
3310 
3311 
3312 	return error;
3313 }
3314 
3315 void
fsw_dp_dtor(struct nx_flowswitch * fsw)3316 fsw_dp_dtor(struct nx_flowswitch *fsw)
3317 {
3318 	uint64_t f = (1 * NSEC_PER_MSEC);         /* 1 ms */
3319 	uint64_t s = (1000 * NSEC_PER_SEC);         /* 1 sec */
3320 	uint32_t i = 0;
3321 
3322 #if (DEVELOPMENT || DEBUG)
3323 	if (fsw->fsw_rps_threads != NULL) {
3324 		for (i = 0; i < fsw->fsw_rps_nthreads; i++) {
3325 			fsw_rps_thread_join(fsw, i);
3326 		}
3327 		kfree_type(struct fsw_rps_thread, fsw->fsw_rps_threads);
3328 	}
3329 #endif /* !DEVELOPMENT && !DEBUG */
3330 
3331 	nx_advisory_free(fsw->fsw_nx);
3332 
3333 	if (fsw->fsw_reap_thread != THREAD_NULL) {
3334 		/* signal thread to begin self-termination */
3335 		lck_mtx_lock(&fsw->fsw_reap_lock);
3336 		fsw->fsw_reap_flags |= FSW_REAPF_TERMINATING;
3337 
3338 		/*
3339 		 * And wait for thread to terminate; use another
3340 		 * wait channel here other than fsw_reap_flags to
3341 		 * make it more explicit.  In the event the reaper
3342 		 * thread misses a wakeup, we'll try again once
3343 		 * every second (except for the first time).
3344 		 */
3345 		while (!(fsw->fsw_reap_flags & FSW_REAPF_TERMINATED)) {
3346 			uint64_t t = 0;
3347 
3348 			nanoseconds_to_absolutetime((i++ == 0) ? f : s, &t);
3349 			clock_absolutetime_interval_to_deadline(t, &t);
3350 			ASSERT(t != 0);
3351 
3352 			fsw->fsw_reap_flags |= FSW_REAPF_TERMINATEBLOCK;
3353 			if (!(fsw->fsw_reap_flags & FSW_REAPF_RUNNING)) {
3354 				thread_wakeup((caddr_t)&fsw->fsw_reap_flags);
3355 			}
3356 			(void) assert_wait_deadline(&fsw->fsw_reap_thread,
3357 			    THREAD_UNINT, t);
3358 			lck_mtx_unlock(&fsw->fsw_reap_lock);
3359 			thread_block(THREAD_CONTINUE_NULL);
3360 			lck_mtx_lock(&fsw->fsw_reap_lock);
3361 			fsw->fsw_reap_flags &= ~FSW_REAPF_TERMINATEBLOCK;
3362 		}
3363 		ASSERT(fsw->fsw_reap_flags & FSW_REAPF_TERMINATED);
3364 		lck_mtx_unlock(&fsw->fsw_reap_lock);
3365 		fsw->fsw_reap_thread = THREAD_NULL;
3366 	}
3367 
3368 	/* free any remaining flow entries in the linger list */
3369 	fsw_linger_purge(fsw);
3370 
3371 	if (fsw->fsw_flow_mgr != NULL) {
3372 		flow_mgr_destroy(fsw->fsw_flow_mgr);
3373 		fsw->fsw_flow_mgr = NULL;
3374 	}
3375 
3376 
3377 	lck_mtx_destroy(&fsw->fsw_detach_barrier_lock, &nexus_lock_group);
3378 	lck_mtx_destroy(&fsw->fsw_reap_lock, &nexus_lock_group);
3379 	lck_mtx_destroy(&fsw->fsw_linger_lock, &nexus_lock_group);
3380 }
3381 
3382 void
fsw_linger_insert(struct flow_entry * fe)3383 fsw_linger_insert(struct flow_entry *fe)
3384 {
3385 	struct nx_flowswitch *fsw = fe->fe_fsw;
3386 	SK_LOG_VAR(char dbgbuf[FLOWENTRY_DBGBUF_SIZE]);
3387 	SK_DF(SK_VERB_FLOW, "entry \"%s\" fe 0x%llx flags 0x%b",
3388 	    fe_as_string(fe, dbgbuf, sizeof(dbgbuf)), SK_KVA(fe),
3389 	    fe->fe_flags, FLOWENTF_BITS);
3390 
3391 	net_update_uptime();
3392 
3393 	ASSERT(flow_entry_refcnt(fe) >= 1);
3394 	ASSERT(fe->fe_flags & FLOWENTF_TORN_DOWN);
3395 	ASSERT(fe->fe_flags & FLOWENTF_DESTROYED);
3396 	ASSERT(!(fe->fe_flags & FLOWENTF_LINGERING));
3397 	ASSERT(fe->fe_flags & FLOWENTF_WAIT_CLOSE);
3398 	ASSERT(fe->fe_linger_wait != 0);
3399 	fe->fe_linger_expire = (_net_uptime + fe->fe_linger_wait);
3400 	atomic_bitset_32(&fe->fe_flags, FLOWENTF_LINGERING);
3401 
3402 	lck_mtx_lock_spin(&fsw->fsw_linger_lock);
3403 	TAILQ_INSERT_TAIL(&fsw->fsw_linger_head, fe, fe_linger_link);
3404 	fsw->fsw_linger_cnt++;
3405 	VERIFY(fsw->fsw_linger_cnt != 0);
3406 	lck_mtx_unlock(&fsw->fsw_linger_lock);
3407 
3408 	fsw_reap_sched(fsw);
3409 }
3410 
3411 static void
fsw_linger_remove_internal(struct flow_entry_linger_head * linger_head,struct flow_entry * fe)3412 fsw_linger_remove_internal(struct flow_entry_linger_head *linger_head,
3413     struct flow_entry *fe)
3414 {
3415 	SK_LOG_VAR(char dbgbuf[FLOWENTRY_DBGBUF_SIZE]);
3416 	SK_DF(SK_VERB_FLOW, "entry \"%s\" fe 0x%llx flags 0x%b",
3417 	    fe_as_string(fe, dbgbuf, sizeof(dbgbuf)), SK_KVA(fe),
3418 	    fe->fe_flags, FLOWENTF_BITS);
3419 
3420 	ASSERT(fe->fe_flags & FLOWENTF_TORN_DOWN);
3421 	ASSERT(fe->fe_flags & FLOWENTF_DESTROYED);
3422 	ASSERT(fe->fe_flags & FLOWENTF_LINGERING);
3423 	atomic_bitclear_32(&fe->fe_flags, FLOWENTF_LINGERING);
3424 
3425 	TAILQ_REMOVE(linger_head, fe, fe_linger_link);
3426 	flow_entry_release(&fe);
3427 }
3428 
3429 static void
fsw_linger_remove(struct flow_entry * fe)3430 fsw_linger_remove(struct flow_entry *fe)
3431 {
3432 	struct nx_flowswitch *fsw = fe->fe_fsw;
3433 
3434 	LCK_MTX_ASSERT(&fsw->fsw_linger_lock, LCK_MTX_ASSERT_OWNED);
3435 
3436 	fsw_linger_remove_internal(&fsw->fsw_linger_head, fe);
3437 	VERIFY(fsw->fsw_linger_cnt != 0);
3438 	fsw->fsw_linger_cnt--;
3439 }
3440 
3441 void
fsw_linger_purge(struct nx_flowswitch * fsw)3442 fsw_linger_purge(struct nx_flowswitch *fsw)
3443 {
3444 	struct flow_entry *fe, *tfe;
3445 
3446 	lck_mtx_lock(&fsw->fsw_linger_lock);
3447 	TAILQ_FOREACH_SAFE(fe, &fsw->fsw_linger_head, fe_linger_link, tfe) {
3448 		fsw_linger_remove(fe);
3449 	}
3450 	ASSERT(fsw->fsw_linger_cnt == 0);
3451 	ASSERT(TAILQ_EMPTY(&fsw->fsw_linger_head));
3452 	lck_mtx_unlock(&fsw->fsw_linger_lock);
3453 }
3454 
3455 void
fsw_reap_sched(struct nx_flowswitch * fsw)3456 fsw_reap_sched(struct nx_flowswitch *fsw)
3457 {
3458 	ASSERT(fsw->fsw_reap_thread != THREAD_NULL);
3459 	lck_mtx_lock_spin(&fsw->fsw_reap_lock);
3460 	if (!(fsw->fsw_reap_flags & FSW_REAPF_RUNNING) &&
3461 	    !(fsw->fsw_reap_flags & (FSW_REAPF_TERMINATING | FSW_REAPF_TERMINATED))) {
3462 		thread_wakeup((caddr_t)&fsw->fsw_reap_flags);
3463 	}
3464 	lck_mtx_unlock(&fsw->fsw_reap_lock);
3465 }
3466 
3467 __attribute__((noreturn))
3468 static void
fsw_reap_thread_func(void * v,wait_result_t w)3469 fsw_reap_thread_func(void *v, wait_result_t w)
3470 {
3471 #pragma unused(w)
3472 	struct nx_flowswitch *fsw = v;
3473 
3474 	ASSERT(fsw->fsw_reap_thread == current_thread());
3475 	thread_set_thread_name(current_thread(), fsw->fsw_reap_name);
3476 
3477 	net_update_uptime();
3478 
3479 	lck_mtx_lock(&fsw->fsw_reap_lock);
3480 	VERIFY(!(fsw->fsw_reap_flags & FSW_REAPF_RUNNING));
3481 	(void) assert_wait(&fsw->fsw_reap_flags, THREAD_UNINT);
3482 	lck_mtx_unlock(&fsw->fsw_reap_lock);
3483 	thread_block_parameter(fsw_reap_thread_cont, fsw);
3484 	/* NOTREACHED */
3485 	__builtin_unreachable();
3486 }
3487 
3488 __attribute__((noreturn))
3489 static void
fsw_reap_thread_cont(void * v,wait_result_t wres)3490 fsw_reap_thread_cont(void *v, wait_result_t wres)
3491 {
3492 	struct nx_flowswitch *fsw = v;
3493 	boolean_t low;
3494 	uint64_t t = 0;
3495 
3496 	SK_DF(SK_VERB_FLOW, "%s: running", fsw->fsw_reap_name);
3497 
3498 	lck_mtx_lock(&fsw->fsw_reap_lock);
3499 	if (__improbable(wres == THREAD_INTERRUPTED ||
3500 	    (fsw->fsw_reap_flags & FSW_REAPF_TERMINATING) != 0)) {
3501 		goto terminate;
3502 	}
3503 
3504 	ASSERT(!(fsw->fsw_reap_flags & FSW_REAPF_TERMINATED));
3505 	fsw->fsw_reap_flags |= FSW_REAPF_RUNNING;
3506 	lck_mtx_unlock(&fsw->fsw_reap_lock);
3507 
3508 	net_update_uptime();
3509 
3510 	/* prevent detach from happening while we're here */
3511 	if (!fsw_detach_barrier_add(fsw)) {
3512 		SK_ERR("%s: netagent detached", fsw->fsw_reap_name);
3513 		t = 0;
3514 	} else {
3515 		uint32_t fe_nonviable, fe_freed, fe_aborted;
3516 		uint32_t fr_freed, fr_resid = 0;
3517 		struct ifnet *ifp = fsw->fsw_ifp;
3518 		uint64_t i = FSW_REAP_IVAL;
3519 		uint64_t now = _net_uptime;
3520 		uint64_t last;
3521 
3522 		ASSERT(fsw->fsw_ifp != NULL);
3523 
3524 		/*
3525 		 * Pass 1: process any deferred {withdrawn,nonviable} requests.
3526 		 */
3527 		fe_nonviable = fsw_process_deferred(fsw);
3528 
3529 		/*
3530 		 * Pass 2: remove any expired lingering flows.
3531 		 */
3532 		fe_freed = fsw_process_linger(fsw, &fe_aborted);
3533 
3534 		/*
3535 		 * Pass 3: prune idle flow routes.
3536 		 */
3537 		fr_freed = flow_route_prune(fsw->fsw_flow_mgr,
3538 		    ifp, &fr_resid);
3539 
3540 		/*
3541 		 * Pass 4: prune flow table
3542 		 *
3543 		 */
3544 		cuckoo_hashtable_try_shrink(fsw->fsw_flow_mgr->fm_flow_table);
3545 
3546 		SK_DF(SK_VERB_FLOW, "%s: fe_nonviable %u/%u fe_freed %u/%u "
3547 		    "fe_aborted %u fr_freed %u/%u",
3548 		    fsw->fsw_flow_mgr->fm_name, fe_nonviable,
3549 		    (fe_nonviable + fsw->fsw_pending_nonviable),
3550 		    fe_freed, fsw->fsw_linger_cnt, fe_aborted, fe_freed,
3551 		    (fe_freed + fr_resid));
3552 
3553 		/* see if VM memory level is critical */
3554 		low = skmem_lowmem_check();
3555 
3556 		/*
3557 		 * If things appear to be idle, we can prune away cached
3558 		 * object that have fallen out of the working sets (this
3559 		 * is different than purging).  Every once in a while, we
3560 		 * also purge the caches.  Note that this is done across
3561 		 * all flowswitch instances, and so we limit this to no
3562 		 * more than once every FSW_REAP_SK_THRES seconds.
3563 		 */
3564 		atomic_get_64(last, &fsw_reap_last);
3565 		if ((low || (last != 0 && (now - last) >= FSW_REAP_SK_THRES)) &&
3566 		    atomic_test_set_64(&fsw_reap_last, last, now)) {
3567 			fsw_purge_cache(fsw, low);
3568 
3569 			/* increase sleep interval if idle */
3570 			if (kdebug_enable == 0 && fsw->fsw_linger_cnt == 0 &&
3571 			    fsw->fsw_pending_nonviable == 0 && fr_resid == 0) {
3572 				i <<= 3;
3573 			}
3574 		} else if (last == 0) {
3575 			atomic_set_64(&fsw_reap_last, now);
3576 		}
3577 
3578 		/*
3579 		 * Additionally, run thru the list of channels and prune
3580 		 * or purge away cached objects on "idle" channels.  This
3581 		 * check is rate limited to no more than once every
3582 		 * FSW_DRAIN_CH_THRES seconds.
3583 		 */
3584 		last = fsw->fsw_drain_channel_chk_last;
3585 		if (low || (last != 0 && (now - last) >= FSW_DRAIN_CH_THRES)) {
3586 			SK_DF(SK_VERB_FLOW, "%s: pruning channels",
3587 			    fsw->fsw_flow_mgr->fm_name);
3588 
3589 			fsw->fsw_drain_channel_chk_last = now;
3590 			fsw_drain_channels(fsw, now, low);
3591 		} else if (__improbable(last == 0)) {
3592 			fsw->fsw_drain_channel_chk_last = now;
3593 		}
3594 
3595 		/*
3596 		 * Finally, invoke the interface's reap callback to
3597 		 * tell it to prune or purge away cached objects if
3598 		 * it is idle.  This check is rate limited to no more
3599 		 * than once every FSW_REAP_IF_THRES seconds.
3600 		 */
3601 		last = fsw->fsw_drain_netif_chk_last;
3602 		if (low || (last != 0 && (now - last) >= FSW_REAP_IF_THRES)) {
3603 			ASSERT(fsw->fsw_nifna != NULL);
3604 
3605 			if (ifp->if_na_ops != NULL &&
3606 			    ifp->if_na_ops->ni_reap != NULL) {
3607 				SK_DF(SK_VERB_FLOW, "%s: pruning netif",
3608 				    fsw->fsw_flow_mgr->fm_name);
3609 				ifp->if_na_ops->ni_reap(ifp->if_na, ifp,
3610 				    FSW_REAP_IF_THRES, low);
3611 			}
3612 
3613 			fsw->fsw_drain_netif_chk_last = now;
3614 		} else if (__improbable(last == 0)) {
3615 			fsw->fsw_drain_netif_chk_last = now;
3616 		}
3617 
3618 		/* emit periodic interface stats ktrace */
3619 		last = fsw->fsw_reap_last;
3620 		if (last != 0 && (now - last) >= FSW_IFSTATS_THRES) {
3621 			KDBG(SK_KTRACE_AON_IF_STATS, ifp->if_data.ifi_ipackets,
3622 			    ifp->if_data.ifi_ibytes * 8,
3623 			    ifp->if_data.ifi_opackets,
3624 			    ifp->if_data.ifi_obytes * 8);
3625 
3626 			fsw->fsw_reap_last = now;
3627 		} else if (__improbable(last == 0)) {
3628 			fsw->fsw_reap_last = now;
3629 		}
3630 
3631 		nanoseconds_to_absolutetime(i * NSEC_PER_SEC, &t);
3632 		clock_absolutetime_interval_to_deadline(t, &t);
3633 		ASSERT(t != 0);
3634 
3635 		/* allow any pending detach to proceed */
3636 		fsw_detach_barrier_remove(fsw);
3637 	}
3638 
3639 	lck_mtx_lock(&fsw->fsw_reap_lock);
3640 	if (!(fsw->fsw_reap_flags & FSW_REAPF_TERMINATING)) {
3641 		fsw->fsw_reap_flags &= ~FSW_REAPF_RUNNING;
3642 		(void) assert_wait_deadline(&fsw->fsw_reap_flags,
3643 		    THREAD_UNINT, t);
3644 		lck_mtx_unlock(&fsw->fsw_reap_lock);
3645 		thread_block_parameter(fsw_reap_thread_cont, fsw);
3646 		/* NOTREACHED */
3647 		__builtin_unreachable();
3648 	} else {
3649 terminate:
3650 		LCK_MTX_ASSERT(&fsw->fsw_reap_lock, LCK_MTX_ASSERT_OWNED);
3651 		fsw->fsw_reap_flags &= ~(FSW_REAPF_RUNNING | FSW_REAPF_TERMINATING);
3652 		fsw->fsw_reap_flags |= FSW_REAPF_TERMINATED;
3653 		/*
3654 		 * And signal any thread waiting for us to terminate;
3655 		 * wait channel here other than fsw_reap_flags to make
3656 		 * it more explicit.
3657 		 */
3658 		if (fsw->fsw_reap_flags & FSW_REAPF_TERMINATEBLOCK) {
3659 			thread_wakeup((caddr_t)&fsw->fsw_reap_thread);
3660 		}
3661 		lck_mtx_unlock(&fsw->fsw_reap_lock);
3662 
3663 		SK_DF(SK_VERB_FLOW, "%s: terminating", fsw->fsw_reap_name);
3664 
3665 		/* for the extra refcnt from kernel_thread_start() */
3666 		thread_deallocate(current_thread());
3667 		/* this is the end */
3668 		thread_terminate(current_thread());
3669 		/* NOTREACHED */
3670 		__builtin_unreachable();
3671 	}
3672 
3673 	/* must never get here */
3674 	VERIFY(0);
3675 	/* NOTREACHED */
3676 	__builtin_unreachable();
3677 }
3678 
3679 static void
fsw_drain_channels(struct nx_flowswitch * fsw,uint64_t now,boolean_t low)3680 fsw_drain_channels(struct nx_flowswitch *fsw, uint64_t now, boolean_t low)
3681 {
3682 	struct kern_nexus *nx = fsw->fsw_nx;
3683 
3684 	/* flowswitch protects NA via fsw_lock, see fsw_port_alloc/free */
3685 	FSW_RLOCK(fsw);
3686 
3687 	/* uncrustify doesn't handle C blocks properly */
3688 	/* BEGIN IGNORE CODESTYLE */
3689 	nx_port_foreach(nx, ^(nexus_port_t p) {
3690 		struct nexus_adapter *na = nx_port_get_na(nx, p);
3691 		if (na == NULL || na->na_work_ts == 0 ||
3692 		    (now - na->na_work_ts) < FSW_DRAIN_CH_THRES) {
3693 			return;
3694 		}
3695 
3696 		/*
3697 		 * If NA has been inactive for some time (twice the drain
3698 		 * threshold), we clear the work timestamp to temporarily skip
3699 		 * this channel until it's active again.  Purging cached objects
3700 		 * can be expensive since we'd need to allocate and construct
3701 		 * them again, so we do it only when necessary.
3702 		 */
3703 		boolean_t purge;
3704 		if (low || ((now - na->na_work_ts) >= (FSW_DRAIN_CH_THRES << 1))) {
3705 			na->na_work_ts = 0;
3706 			purge = TRUE;
3707 		} else {
3708 			purge = FALSE;
3709 		}
3710 
3711 		na_drain(na, purge);  /* purge/prune caches */
3712 	});
3713 	/* END IGNORE CODESTYLE */
3714 
3715 	FSW_RUNLOCK(fsw);
3716 }
3717 
3718 static void
fsw_purge_cache(struct nx_flowswitch * fsw,boolean_t low)3719 fsw_purge_cache(struct nx_flowswitch *fsw, boolean_t low)
3720 {
3721 #pragma unused(fsw)
3722 	uint64_t o = atomic_add_64_ov(&fsw_want_purge, 1);
3723 	uint32_t p = fsw_flow_purge_thresh;
3724 	boolean_t purge = (low || (o != 0 && p != 0 && (o % p) == 0));
3725 
3726 	SK_DF(SK_VERB_FLOW, "%s: %s caches",
3727 	    fsw->fsw_flow_mgr->fm_name,
3728 	    (purge ? "purge" : "prune"));
3729 
3730 	skmem_cache_reap_now(sk_fo_cache, purge);
3731 	skmem_cache_reap_now(sk_fe_cache, purge);
3732 	skmem_cache_reap_now(sk_fab_cache, purge);
3733 	skmem_cache_reap_now(flow_route_cache, purge);
3734 	skmem_cache_reap_now(flow_stats_cache, purge);
3735 	eventhandler_reap_caches(purge);
3736 	netns_reap_caches(purge);
3737 	skmem_reap_caches(purge);
3738 
3739 	if (if_is_fsw_transport_netagent_enabled() && purge) {
3740 		mbuf_drain(FALSE);
3741 	}
3742 }
3743 
3744 static void
fsw_flow_handle_low_power(struct nx_flowswitch * fsw,struct flow_entry * fe)3745 fsw_flow_handle_low_power(struct nx_flowswitch *fsw, struct flow_entry *fe)
3746 {
3747 	/* When the interface is in low power mode, the flow is nonviable */
3748 	if (!(fe->fe_flags & FLOWENTF_NONVIABLE) &&
3749 	    atomic_test_set_32(&fe->fe_want_nonviable, 0, 1)) {
3750 		atomic_add_32(&fsw->fsw_pending_nonviable, 1);
3751 	}
3752 }
3753 
3754 static uint32_t
fsw_process_deferred(struct nx_flowswitch * fsw)3755 fsw_process_deferred(struct nx_flowswitch *fsw)
3756 {
3757 	struct flow_entry_dead sfed __sk_aligned(8);
3758 	struct flow_mgr *fm = fsw->fsw_flow_mgr;
3759 	struct flow_entry_dead *fed, *tfed;
3760 	LIST_HEAD(, flow_entry_dead) fed_head =
3761 	    LIST_HEAD_INITIALIZER(fed_head);
3762 	uint32_t i, nonviable = 0;
3763 	boolean_t lowpowermode = FALSE;
3764 
3765 	bzero(&sfed, sizeof(sfed));
3766 
3767 	/*
3768 	 * The flows become nonviable when the interface
3769 	 * is in low power mode (edge trigger)
3770 	 */
3771 	if ((fsw->fsw_ifp->if_xflags & IFXF_LOW_POWER) &&
3772 	    fsw->fsw_ifp->if_low_power_gencnt != fsw->fsw_low_power_gencnt) {
3773 		lowpowermode = TRUE;
3774 		fsw->fsw_low_power_gencnt = fsw->fsw_ifp->if_low_power_gencnt;
3775 	}
3776 
3777 	/*
3778 	 * Scan thru the flow entry tree, and commit any pending withdraw or
3779 	 * nonviable requests.  We may need to push stats and/or unassign the
3780 	 * nexus from NECP, but we cannot do that while holding the locks;
3781 	 * build a temporary list for those entries.
3782 	 */
3783 	for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
3784 		struct flow_owner_bucket *fob = flow_mgr_get_fob_at_idx(fm, i);
3785 		struct flow_owner *fo;
3786 
3787 		/*
3788 		 * Grab the lock at all costs when handling low power mode
3789 		 */
3790 		if (__probable(!lowpowermode)) {
3791 			if (!FOB_TRY_LOCK(fob)) {
3792 				continue;
3793 			}
3794 		} else {
3795 			FOB_LOCK(fob);
3796 		}
3797 
3798 		FOB_LOCK_ASSERT_HELD(fob);
3799 		RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
3800 			struct flow_entry *fe;
3801 
3802 			RB_FOREACH(fe, flow_entry_id_tree,
3803 			    &fo->fo_flow_entry_id_head) {
3804 				/* try first as reader; skip if we can't */
3805 				if (__improbable(lowpowermode)) {
3806 					fsw_flow_handle_low_power(fsw, fe);
3807 				}
3808 				if (__improbable(fe->fe_flags & FLOWENTF_HALF_CLOSED)) {
3809 					atomic_bitclear_32(&fe->fe_flags, FLOWENTF_HALF_CLOSED);
3810 					flow_namespace_half_close(&fe->fe_port_reservation);
3811 				}
3812 
3813 				/* if not withdrawn/nonviable, skip */
3814 				if (!fe->fe_want_withdraw &&
3815 				    !fe->fe_want_nonviable) {
3816 					continue;
3817 				}
3818 				/*
3819 				 * Here we're holding the lock as writer;
3820 				 * don't spend too much time as we're
3821 				 * blocking the data path now.
3822 				 */
3823 				ASSERT(!uuid_is_null(fe->fe_uuid));
3824 				/* only need flow UUID and booleans */
3825 				uuid_copy(sfed.fed_uuid, fe->fe_uuid);
3826 				sfed.fed_want_clonotify =
3827 				    (fe->fe_flags & FLOWENTF_CLOSE_NOTIFY);
3828 				sfed.fed_want_nonviable = fe->fe_want_nonviable;
3829 				flow_entry_teardown(fo, fe);
3830 
3831 				/* do this outside the flow bucket lock */
3832 				fed = flow_entry_dead_alloc(Z_WAITOK);
3833 				ASSERT(fed != NULL);
3834 				*fed = sfed;
3835 				LIST_INSERT_HEAD(&fed_head, fed, fed_link);
3836 			}
3837 		}
3838 		FOB_UNLOCK(fob);
3839 	}
3840 
3841 	/*
3842 	 * These nonviable flows are no longer useful since we've lost
3843 	 * the source IP address; in the event the client monitors the
3844 	 * viability of the flow, explicitly mark it as nonviable so
3845 	 * that a new flow can be created.
3846 	 */
3847 	LIST_FOREACH_SAFE(fed, &fed_head, fed_link, tfed) {
3848 		LIST_REMOVE(fed, fed_link);
3849 		ASSERT(fsw->fsw_agent_session != NULL);
3850 
3851 		/* if flow is closed early */
3852 		if (fed->fed_want_clonotify) {
3853 			necp_client_early_close(fed->fed_uuid);
3854 		}
3855 
3856 		/* if nonviable, unassign nexus attributes */
3857 		if (fed->fed_want_nonviable) {
3858 			(void) netagent_assign_nexus(fsw->fsw_agent_session,
3859 			    fed->fed_uuid, NULL, 0);
3860 		}
3861 
3862 		flow_entry_dead_free(fed);
3863 		++nonviable;
3864 	}
3865 	ASSERT(LIST_EMPTY(&fed_head));
3866 
3867 	return nonviable;
3868 }
3869 
3870 static uint32_t
fsw_process_linger(struct nx_flowswitch * fsw,uint32_t * abort)3871 fsw_process_linger(struct nx_flowswitch *fsw, uint32_t *abort)
3872 {
3873 	struct flow_entry_linger_head linger_head =
3874 	    TAILQ_HEAD_INITIALIZER(linger_head);
3875 	struct flow_entry *fe, *tfe;
3876 	uint64_t now = _net_uptime;
3877 	uint32_t i = 0, cnt = 0, freed = 0;
3878 
3879 	ASSERT(fsw->fsw_ifp != NULL);
3880 	ASSERT(abort != NULL);
3881 	*abort = 0;
3882 
3883 	/*
3884 	 * We don't want to contend with the datapath, so move
3885 	 * everything that's in the linger list into a local list.
3886 	 * This allows us to generate RSTs or free the flow entry
3887 	 * outside the lock.  Any remaining flow entry in the local
3888 	 * list will get re-added back to the head of the linger
3889 	 * list, in front of any new ones added since then.
3890 	 */
3891 	lck_mtx_lock(&fsw->fsw_linger_lock);
3892 	TAILQ_CONCAT(&linger_head, &fsw->fsw_linger_head, fe_linger_link);
3893 	ASSERT(TAILQ_EMPTY(&fsw->fsw_linger_head));
3894 	cnt = fsw->fsw_linger_cnt;
3895 	fsw->fsw_linger_cnt = 0;
3896 	lck_mtx_unlock(&fsw->fsw_linger_lock);
3897 
3898 	TAILQ_FOREACH_SAFE(fe, &linger_head, fe_linger_link, tfe) {
3899 		ASSERT(fe->fe_flags & FLOWENTF_TORN_DOWN);
3900 		ASSERT(fe->fe_flags & FLOWENTF_DESTROYED);
3901 		ASSERT(fe->fe_flags & FLOWENTF_LINGERING);
3902 
3903 		/*
3904 		 * See if this is a TCP flow that needs to generate
3905 		 * a RST to the remote peer (if not already).
3906 		 */
3907 		if (flow_track_tcp_want_abort(fe)) {
3908 			VERIFY(fe->fe_flags & FLOWENTF_ABORTED);
3909 			ASSERT(!uuid_is_null(fe->fe_uuid));
3910 			flow_track_abort_tcp(fe, NULL, NULL);
3911 			(*abort)++;
3912 			SK_LOG_VAR(char dbgbuf[FLOWENTRY_DBGBUF_SIZE]);
3913 			SK_DF(SK_VERB_FLOW, "entry \"%s\" fe 0x%llx "
3914 			    "flags 0x%b [RST]", fe_as_string(fe, dbgbuf,
3915 			    sizeof(dbgbuf)), SK_KVA(fe), fe->fe_flags,
3916 			    FLOWENTF_BITS);
3917 		}
3918 
3919 		/*
3920 		 * If flow has expired, remove from list and free;
3921 		 * otherwise leave it around in the linger list.
3922 		 */
3923 		if (fe->fe_linger_expire <= now) {
3924 			freed++;
3925 			fsw_linger_remove_internal(&linger_head, fe);
3926 			fe = NULL;
3927 		}
3928 		++i;
3929 	}
3930 	VERIFY(i == cnt && cnt >= freed);
3931 
3932 	/*
3933 	 * Add any remaining ones back into the linger list.
3934 	 */
3935 	lck_mtx_lock(&fsw->fsw_linger_lock);
3936 	if (!TAILQ_EMPTY(&linger_head)) {
3937 		ASSERT(TAILQ_EMPTY(&fsw->fsw_linger_head) || fsw->fsw_linger_cnt);
3938 		TAILQ_CONCAT(&linger_head, &fsw->fsw_linger_head, fe_linger_link);
3939 		ASSERT(TAILQ_EMPTY(&fsw->fsw_linger_head));
3940 		TAILQ_CONCAT(&fsw->fsw_linger_head, &linger_head, fe_linger_link);
3941 		fsw->fsw_linger_cnt += (cnt - freed);
3942 	}
3943 	ASSERT(TAILQ_EMPTY(&linger_head));
3944 	lck_mtx_unlock(&fsw->fsw_linger_lock);
3945 
3946 	return freed;
3947 }
3948 
3949 __attribute__((always_inline))
3950 static inline void
fsw_ifp_inc_traffic_class_in_pkt(struct ifnet * ifp,kern_packet_t ph)3951 fsw_ifp_inc_traffic_class_in_pkt(struct ifnet *ifp, kern_packet_t ph)
3952 {
3953 	switch (__packet_get_traffic_class(ph)) {
3954 	case PKT_TC_BE:
3955 		ifp->if_tc.ifi_ibepackets++;
3956 		ifp->if_tc.ifi_ibebytes += SK_PTR_ADDR_KPKT(ph)->pkt_length;
3957 		break;
3958 	case PKT_TC_BK:
3959 		ifp->if_tc.ifi_ibkpackets++;
3960 		ifp->if_tc.ifi_ibkbytes += SK_PTR_ADDR_KPKT(ph)->pkt_length;
3961 		break;
3962 	case PKT_TC_VI:
3963 		ifp->if_tc.ifi_ivipackets++;
3964 		ifp->if_tc.ifi_ivibytes += SK_PTR_ADDR_KPKT(ph)->pkt_length;
3965 		break;
3966 	case PKT_TC_VO:
3967 		ifp->if_tc.ifi_ivopackets++;
3968 		ifp->if_tc.ifi_ivobytes += SK_PTR_ADDR_KPKT(ph)->pkt_length;
3969 		break;
3970 	default:
3971 		break;
3972 	}
3973 }
3974 
3975 __attribute__((always_inline))
3976 static inline void
fsw_ifp_inc_traffic_class_out_pkt(struct ifnet * ifp,uint32_t svc,uint32_t cnt,uint32_t len)3977 fsw_ifp_inc_traffic_class_out_pkt(struct ifnet *ifp, uint32_t svc,
3978     uint32_t cnt, uint32_t len)
3979 {
3980 	switch (svc) {
3981 	case PKT_TC_BE:
3982 		ifp->if_tc.ifi_obepackets += cnt;
3983 		ifp->if_tc.ifi_obebytes += len;
3984 		break;
3985 	case PKT_TC_BK:
3986 		ifp->if_tc.ifi_obkpackets += cnt;
3987 		ifp->if_tc.ifi_obkbytes += len;
3988 		break;
3989 	case PKT_TC_VI:
3990 		ifp->if_tc.ifi_ovipackets += cnt;
3991 		ifp->if_tc.ifi_ovibytes += len;
3992 		break;
3993 	case PKT_TC_VO:
3994 		ifp->if_tc.ifi_ovopackets += cnt;
3995 		ifp->if_tc.ifi_ovobytes += len;
3996 		break;
3997 	default:
3998 		break;
3999 	}
4000 }
4001