xref: /xnu-8019.80.24/bsd/skywalk/channel/channel_kern.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/kdebug.h>
30 #include <skywalk/os_skywalk_private.h>
31 #include <net/ntstat.h>
32 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
33 #include <skywalk/nexus/netif/nx_netif.h>
34 #include <skywalk/nexus/upipe/nx_user_pipe.h>
35 
36 #define KRING_EMPTY_TX(_kring, _index)  \
37 	((_kring)->ckr_rhead == (_index))
38 
39 #define KRING_FULL_RX(_kring, _index)                                   \
40 	((_kring)->ckr_khead == SLOT_NEXT((_index), (_kring)->ckr_lim))
41 
42 uint32_t
kern_channel_notify(const kern_channel_ring_t kring,uint32_t flags)43 kern_channel_notify(const kern_channel_ring_t kring, uint32_t flags)
44 {
45 #pragma unused(flags)
46 	if (__improbable(KR_DROP(kring))) {
47 		return ENXIO;
48 	}
49 
50 	return kring->ckr_na_notify(kring, kernproc, 0);
51 }
52 
53 uint32_t
kern_channel_reclaim(const kern_channel_ring_t kring)54 kern_channel_reclaim(const kern_channel_ring_t kring)
55 {
56 	return kr_reclaim(kring);
57 }
58 
59 static inline uint32_t
_kern_channel_available_slot_count_tx(const kern_channel_ring_t kring,slot_idx_t index)60 _kern_channel_available_slot_count_tx(const kern_channel_ring_t kring,
61     slot_idx_t index)
62 {
63 	ASSERT(kring->ckr_tx == NR_TX);
64 
65 	if (kring->ckr_rhead < index) {
66 		return kring->ckr_num_slots + kring->ckr_rhead - index;
67 	}
68 
69 	return kring->ckr_rhead - index;
70 }
71 
72 static inline uint32_t
_kern_channel_available_slot_count_rx(const kern_channel_ring_t kring,slot_idx_t index)73 _kern_channel_available_slot_count_rx(const kern_channel_ring_t kring,
74     slot_idx_t index)
75 {
76 	uint32_t busy;
77 	slot_idx_t lim = kring->ckr_lim;
78 
79 	ASSERT(kring->ckr_tx == NR_RX);
80 
81 	if (index < kring->ckr_khead) {
82 		busy = kring->ckr_num_slots + index - kring->ckr_khead;
83 	} else {
84 		busy = index - kring->ckr_khead;
85 	}
86 
87 	ASSERT(lim >= busy);
88 	return lim - busy;
89 }
90 
91 uint32_t
kern_channel_available_slot_count(const kern_channel_ring_t kring)92 kern_channel_available_slot_count(const kern_channel_ring_t kring)
93 {
94 	if (kring->ckr_tx == NR_TX) {
95 		return _kern_channel_available_slot_count_tx(kring,
96 		           kring->ckr_khead);
97 	} else {
98 		return _kern_channel_available_slot_count_rx(kring,
99 		           kring->ckr_ktail);
100 	}
101 }
102 
103 kern_channel_slot_t
kern_channel_get_next_slot(const kern_channel_ring_t kring,const kern_channel_slot_t slot0,struct kern_slot_prop * prop)104 kern_channel_get_next_slot(const kern_channel_ring_t kring,
105     const kern_channel_slot_t slot0, struct kern_slot_prop *prop)
106 {
107 	kern_channel_slot_t slot;
108 	slot_idx_t slot_idx;
109 
110 	/* Ensure this is only done by the thread doing a sync syscall */
111 	VERIFY(sk_is_sync_protected());
112 
113 	if (__improbable(slot0 == NULL)) {
114 		if (kring->ckr_tx == NR_TX) {
115 			slot_idx = kring->ckr_khead;
116 		} else {
117 			slot_idx = kring->ckr_ktail;
118 		}
119 	} else {
120 		slot_idx = SLOT_NEXT(KR_SLOT_INDEX(kring, slot0),
121 		    kring->ckr_lim);
122 	}
123 
124 	ASSERT(slot_idx < kring->ckr_num_slots);
125 
126 	if (kring->ckr_tx == NR_TX) {
127 		if (__improbable(KRING_EMPTY_TX(kring, slot_idx))) {
128 			SK_DF(SK_VERB_SYNC | SK_VERB_TX,
129 			    "EMPTY_TX: na \"%s\" kr \"%s\" "
130 			    "i %u (kc %u kt %u kl %u | rh %u rt %u)",
131 			    KRNA(kring)->na_name,
132 			    kring->ckr_name, slot_idx, kring->ckr_khead,
133 			    kring->ckr_ktail, kring->ckr_klease,
134 			    kring->ckr_rhead, kring->ckr_rtail);
135 			slot = NULL;
136 		} else {
137 			slot = &kring->ckr_ksds[slot_idx];
138 		}
139 	} else {
140 		if (__improbable(KRING_FULL_RX(kring, slot_idx))) {
141 			SK_DF(SK_VERB_SYNC | SK_VERB_RX,
142 			    "FULL_RX: na \"%s\" kr \"%s\" "
143 			    "i %u (kc %u kt %u kl %u | rh %u rt %u)",
144 			    KRNA(kring)->na_name,
145 			    kring->ckr_name, slot_idx, kring->ckr_khead,
146 			    kring->ckr_ktail, kring->ckr_klease,
147 			    kring->ckr_rhead, kring->ckr_rtail);
148 			slot = NULL;
149 		} else {
150 			slot = &kring->ckr_ksds[slot_idx];
151 		}
152 	}
153 
154 	if (prop != NULL) {
155 		bzero(prop, sizeof(*prop));
156 	}
157 
158 	return slot;
159 }
160 
161 static inline void
_kern_channel_advance_slot_tx(const kern_channel_ring_t kring,slot_idx_t index)162 _kern_channel_advance_slot_tx(const kern_channel_ring_t kring, slot_idx_t index)
163 {
164 	/* Ensure this is only done by the thread doing a sync syscall */
165 	VERIFY(sk_is_sync_protected());
166 	kr_txkring_reclaim_and_refill(kring, index);
167 }
168 
169 static inline void
_kern_channel_advance_slot_rx(const kern_channel_ring_t kring,slot_idx_t index)170 _kern_channel_advance_slot_rx(const kern_channel_ring_t kring, slot_idx_t index)
171 {
172 	ASSERT(kring->ckr_tx == NR_RX || kring->ckr_tx == NR_EV);
173 	/* Ensure this is only done by the thread doing a sync syscall */
174 	VERIFY(sk_is_sync_protected());
175 
176 	kring->ckr_ktail = SLOT_NEXT(index, kring->ckr_lim);
177 }
178 
179 void
kern_channel_advance_slot(const kern_channel_ring_t kring,kern_channel_slot_t slot)180 kern_channel_advance_slot(const kern_channel_ring_t kring,
181     kern_channel_slot_t slot)
182 {
183 	slot_idx_t index = KR_SLOT_INDEX(kring, slot);
184 	ASSERT(index < kring->ckr_num_slots);
185 
186 	if (kring->ckr_tx == NR_TX) {
187 		_kern_channel_advance_slot_tx(kring, index);
188 	} else {
189 		_kern_channel_advance_slot_rx(kring, index);
190 	}
191 }
192 
193 void *
kern_channel_get_context(const kern_channel_t ch)194 kern_channel_get_context(const kern_channel_t ch)
195 {
196 	return ch->ch_ctx;
197 }
198 
199 void *
kern_channel_ring_get_context(const kern_channel_ring_t kring)200 kern_channel_ring_get_context(const kern_channel_ring_t kring)
201 {
202 	return kring->ckr_ctx;
203 }
204 
205 errno_t
kern_channel_ring_get_container(const kern_channel_ring_t kring,kern_packet_t ** array,uint32_t * count)206 kern_channel_ring_get_container(const kern_channel_ring_t kring,
207     kern_packet_t **array, uint32_t *count)
208 {
209 	/* Ensure this is only done by the thread doing a sync syscall */
210 	VERIFY(sk_is_sync_protected());
211 
212 	if (array == NULL) {
213 		return EINVAL;
214 	}
215 
216 	*array = kring->ckr_scratch;
217 	if (count != NULL) {
218 		*count = na_get_nslots(kring->ckr_na, kring->ckr_tx);
219 	}
220 
221 	return 0;
222 }
223 
224 void *
kern_channel_slot_get_context(const kern_channel_ring_t kring,const kern_channel_slot_t slot)225 kern_channel_slot_get_context(const kern_channel_ring_t kring,
226     const kern_channel_slot_t slot)
227 {
228 	slot_idx_t i = KR_SLOT_INDEX(kring, slot);
229 	void *slot_ctx = NULL;
230 
231 	if (kring->ckr_slot_ctxs != NULL) {
232 		slot_ctx = (void *)(kring->ckr_slot_ctxs[i].slot_ctx_arg);
233 	}
234 
235 	return slot_ctx;
236 }
237 
238 void
kern_channel_increment_ring_stats(kern_channel_ring_t kring,struct kern_channel_ring_stat_increment * stats)239 kern_channel_increment_ring_stats(kern_channel_ring_t kring,
240     struct kern_channel_ring_stat_increment *stats)
241 {
242 	kr_update_stats(kring, stats->kcrsi_slots_transferred,
243 	    stats->kcrsi_bytes_transferred);
244 }
245 
246 void
kern_channel_increment_ring_net_stats(kern_channel_ring_t kring,struct ifnet * ifp,struct kern_channel_ring_stat_increment * stats)247 kern_channel_increment_ring_net_stats(kern_channel_ring_t kring,
248     struct ifnet *ifp, struct kern_channel_ring_stat_increment *stats)
249 {
250 	if (kring->ckr_tx == NR_TX) {
251 		atomic_add_64(&ifp->if_data.ifi_opackets,
252 		    stats->kcrsi_slots_transferred);
253 		atomic_add_64(&ifp->if_data.ifi_obytes,
254 		    stats->kcrsi_bytes_transferred);
255 	} else {
256 		atomic_add_64(&ifp->if_data.ifi_ipackets,
257 		    stats->kcrsi_slots_transferred);
258 		atomic_add_64(&ifp->if_data.ifi_ibytes,
259 		    stats->kcrsi_bytes_transferred);
260 	}
261 
262 	if (ifp->if_data_threshold != 0) {
263 		ifnet_notify_data_threshold(ifp);
264 	}
265 
266 	kr_update_stats(kring, stats->kcrsi_slots_transferred,
267 	    stats->kcrsi_bytes_transferred);
268 }
269 
270 kern_packet_t
kern_channel_slot_get_packet(const kern_channel_ring_t kring,const kern_channel_slot_t slot)271 kern_channel_slot_get_packet(const kern_channel_ring_t kring,
272     const kern_channel_slot_t slot)
273 {
274 #if (DEVELOPMENT || DEBUG)
275 	/* catch invalid slot */
276 	slot_idx_t idx = KR_SLOT_INDEX(kring, slot);
277 	struct __kern_slot_desc *ksd = KR_KSD(kring, idx);
278 #else
279 #pragma unused(kring)
280 	struct __kern_slot_desc *ksd = SLOT_DESC_KSD(slot);
281 #endif /* (DEVELOPMENT || DEBUG) */
282 	struct __kern_quantum *kqum = ksd->sd_qum;
283 
284 	if (__improbable(kqum == NULL ||
285 	    (kqum->qum_qflags & QUM_F_DROPPED) != 0)) {
286 		return 0;
287 	}
288 
289 	return SD_GET_TAGGED_METADATA(ksd);
290 }
291 
292 errno_t
kern_channel_slot_attach_packet(const kern_channel_ring_t kring,const kern_channel_slot_t slot,kern_packet_t ph)293 kern_channel_slot_attach_packet(const kern_channel_ring_t kring,
294     const kern_channel_slot_t slot, kern_packet_t ph)
295 {
296 #if (DEVELOPMENT || DEBUG)
297 	/* catch invalid slot */
298 	slot_idx_t idx = KR_SLOT_INDEX(kring, slot);
299 	struct __kern_slot_desc *ksd = KR_KSD(kring, idx);
300 #else
301 #pragma unused(kring)
302 	struct __kern_slot_desc *ksd = SLOT_DESC_KSD(slot);
303 #endif /* (DEVELOPMENT || DEBUG) */
304 
305 	return KR_SLOT_ATTACH_METADATA(kring, ksd, SK_PTR_ADDR_KQUM(ph));
306 }
307 
308 errno_t
kern_channel_slot_detach_packet(const kern_channel_ring_t kring,const kern_channel_slot_t slot,kern_packet_t ph)309 kern_channel_slot_detach_packet(const kern_channel_ring_t kring,
310     const kern_channel_slot_t slot, kern_packet_t ph)
311 {
312 #pragma unused(ph)
313 #if (DEVELOPMENT || DEBUG)
314 	/* catch invalid slot */
315 	slot_idx_t idx = KR_SLOT_INDEX(kring, slot);
316 	struct __kern_slot_desc *ksd = KR_KSD(kring, idx);
317 #else
318 	struct __kern_slot_desc *ksd = SLOT_DESC_KSD(slot);
319 #endif /* (DEVELOPMENT || DEBUG) */
320 
321 	ASSERT(SK_PTR_ADDR_KQUM(ph) ==
322 	    SK_PTR_ADDR_KQUM(SD_GET_TAGGED_METADATA(ksd)));
323 	(void) KR_SLOT_DETACH_METADATA(kring, ksd);
324 
325 	return 0;
326 }
327 
328 static errno_t
kern_channel_tx_refill_common(const kern_channel_ring_t hw_kring,uint32_t pkt_limit,uint32_t byte_limit,boolean_t tx_doorbell_ctxt,boolean_t * pkts_pending,boolean_t canblock)329 kern_channel_tx_refill_common(const kern_channel_ring_t hw_kring,
330     uint32_t pkt_limit, uint32_t byte_limit, boolean_t tx_doorbell_ctxt,
331     boolean_t *pkts_pending, boolean_t canblock)
332 {
333 #pragma unused(tx_doorbell_ctxt)
334 	struct nexus_adapter *hwna;
335 	struct ifnet *ifp;
336 	sk_protect_t protect;
337 	errno_t rc = 0;
338 	errno_t sync_err = 0;
339 
340 	KDBG((SK_KTRACE_CHANNEL_TX_REFILL | DBG_FUNC_START), SK_KVA(hw_kring));
341 
342 	VERIFY(hw_kring != NULL);
343 	hwna = KRNA(hw_kring);
344 	ifp = hwna->na_ifp;
345 
346 	ASSERT(hwna->na_type == NA_NETIF_DEV);
347 	ASSERT(hw_kring->ckr_tx == NR_TX);
348 	*pkts_pending = FALSE;
349 
350 	if (__improbable(pkt_limit == 0 || byte_limit == 0)) {
351 		SK_ERR("invalid limits plim %d, blim %d",
352 		    pkt_limit, byte_limit);
353 		rc = EINVAL;
354 		goto out;
355 	}
356 
357 	if (__improbable(!IF_FULLY_ATTACHED(ifp))) {
358 		SK_ERR("hwna 0x%llx ifp %s (0x%llx), interface not attached",
359 		    SK_KVA(hwna), if_name(ifp), SK_KVA(ifp));
360 		rc = ENXIO;
361 		goto out;
362 	}
363 
364 	if (__improbable((ifp->if_start_flags & IFSF_FLOW_CONTROLLED) != 0)) {
365 		SK_DF(SK_VERB_SYNC | SK_VERB_TX, "hwna 0x%llx ifp %s (0x%llx), "
366 		    "flow control ON", SK_KVA(hwna), if_name(ifp), SK_KVA(ifp));
367 		rc = ENXIO;
368 		goto out;
369 	}
370 
371 	/*
372 	 * if the ring is busy, it means another dequeue is in
373 	 * progress, so ignore this request and return success.
374 	 */
375 	if (kr_enter(hw_kring, canblock) != 0) {
376 		rc = 0;
377 		goto out;
378 	}
379 
380 	if (__improbable(KR_DROP(hw_kring) ||
381 	    !NA_IS_ACTIVE(hw_kring->ckr_na))) {
382 		kr_exit(hw_kring);
383 		SK_ERR("hw-kr 0x%llx stopped", SK_KVA(hw_kring));
384 		rc = ENXIO;
385 		goto out;
386 	}
387 
388 	/*
389 	 * Unlikely to get here, unless a channel is opened by
390 	 * a user process directly to the netif.  Issue a TX sync
391 	 * on the netif device TX ring.
392 	 */
393 	protect = sk_sync_protect();
394 	sync_err = hw_kring->ckr_na_sync(hw_kring, kernproc,
395 	    NA_SYNCF_NETIF);
396 	sk_sync_unprotect(protect);
397 	kr_exit(hw_kring);
398 
399 	if (rc == 0) {
400 		rc = sync_err;
401 	}
402 
403 out:
404 	KDBG((SK_KTRACE_CHANNEL_TX_REFILL | DBG_FUNC_END), SK_KVA(hw_kring),
405 	    rc, 0, 0);
406 
407 	return rc;
408 }
409 
410 errno_t
kern_channel_tx_refill(const kern_channel_ring_t hw_kring,uint32_t pkt_limit,uint32_t byte_limit,boolean_t tx_doorbell_ctxt,boolean_t * pkts_pending)411 kern_channel_tx_refill(const kern_channel_ring_t hw_kring,
412     uint32_t pkt_limit, uint32_t byte_limit, boolean_t tx_doorbell_ctxt,
413     boolean_t *pkts_pending)
414 {
415 	if (NA_OWNED_BY_FSW(hw_kring->ckr_na)) {
416 		return netif_ring_tx_refill(hw_kring, pkt_limit,
417 		           byte_limit, tx_doorbell_ctxt, pkts_pending, FALSE);
418 	} else {
419 		return kern_channel_tx_refill_common(hw_kring, pkt_limit,
420 		           byte_limit, tx_doorbell_ctxt, pkts_pending, FALSE);
421 	}
422 }
423 
424 errno_t
kern_channel_tx_refill_canblock(const kern_channel_ring_t hw_kring,uint32_t pkt_limit,uint32_t byte_limit,boolean_t tx_doorbell_ctxt,boolean_t * pkts_pending)425 kern_channel_tx_refill_canblock(const kern_channel_ring_t hw_kring,
426     uint32_t pkt_limit, uint32_t byte_limit, boolean_t tx_doorbell_ctxt,
427     boolean_t *pkts_pending)
428 {
429 	if (NA_OWNED_BY_FSW(hw_kring->ckr_na)) {
430 		return netif_ring_tx_refill(hw_kring, pkt_limit,
431 		           byte_limit, tx_doorbell_ctxt, pkts_pending, TRUE);
432 	} else {
433 		return kern_channel_tx_refill_common(hw_kring, pkt_limit,
434 		           byte_limit, tx_doorbell_ctxt, pkts_pending, TRUE);
435 	}
436 }
437 
438 errno_t
kern_channel_get_service_class(const kern_channel_ring_t kring,kern_packet_svc_class_t * svc)439 kern_channel_get_service_class(const kern_channel_ring_t kring,
440     kern_packet_svc_class_t *svc)
441 {
442 	if ((KRNA(kring)->na_type != NA_NETIF_DEV) ||
443 	    (kring->ckr_tx == NR_RX) || (kring->ckr_svc == KPKT_SC_UNSPEC)) {
444 		return ENOTSUP;
445 	}
446 	*svc = kring->ckr_svc;
447 	return 0;
448 }
449 
450 void
kern_channel_flowadv_clear(struct flowadv_fcentry * fce)451 kern_channel_flowadv_clear(struct flowadv_fcentry *fce)
452 {
453 	const flowadv_token_t ch_token = fce->fce_flowsrc_token;
454 	const flowadv_token_t flow_token = fce->fce_flowid;
455 	const flowadv_idx_t flow_fidx = fce->fce_flowsrc_fidx;
456 	struct ifnet *ifp = fce->fce_ifp;
457 	struct nexus_adapter *hwna;
458 	struct kern_nexus *fsw_nx;
459 	struct kern_channel *ch = NULL;
460 	struct nx_flowswitch *fsw;
461 
462 	_CASSERT(sizeof(ch->ch_info->cinfo_ch_token) == sizeof(ch_token));
463 
464 	SK_LOCK();
465 	if (ifnet_is_attached(ifp, 0) == 0 || ifp->if_na == NULL) {
466 		goto done;
467 	}
468 
469 	hwna = &ifp->if_na->nifna_up;
470 	VERIFY((hwna->na_type == NA_NETIF_DEV) ||
471 	    (hwna->na_type == NA_NETIF_COMPAT_DEV));
472 
473 	if (!NA_IS_ACTIVE(hwna) || (fsw = fsw_ifp_to_fsw(ifp)) == NULL) {
474 		goto done;
475 	}
476 
477 	fsw_nx = fsw->fsw_nx;
478 	VERIFY(fsw_nx != NULL);
479 
480 	/* find the channel */
481 	STAILQ_FOREACH(ch, &fsw_nx->nx_ch_head, ch_link) {
482 		if (ch_token == ch->ch_info->cinfo_ch_token) {
483 			break;
484 		}
485 	}
486 
487 	if (ch != NULL) {
488 		if (ch->ch_na != NULL &&
489 		    na_flowadv_clear(ch, flow_fidx, flow_token)) {
490 			/* trigger flow advisory kevent */
491 			na_flowadv_event(
492 				&ch->ch_na->na_tx_rings[ch->ch_first[NR_TX]]);
493 			SK_DF(SK_VERB_FLOW_ADVISORY,
494 			    "%s(%d) notified of flow update",
495 			    ch->ch_name, ch->ch_pid);
496 		} else if (ch->ch_na == NULL) {
497 			SK_DF(SK_VERB_FLOW_ADVISORY,
498 			    "%s(%d) is closing (flow update ignored)",
499 			    ch->ch_name, ch->ch_pid);
500 		}
501 	} else {
502 		SK_ERR("channel token 0x%x fidx %u on %s not found",
503 		    ch_token, flow_fidx, ifp->if_xname);
504 	}
505 done:
506 	SK_UNLOCK();
507 }
508 
509 void
kern_channel_memstatus(struct proc * p,uint32_t status,struct kern_channel * ch)510 kern_channel_memstatus(struct proc *p, uint32_t status,
511     struct kern_channel *ch)
512 {
513 #pragma unused(p, status)
514 	SK_LOCK_ASSERT_NOTHELD();
515 
516 	ASSERT(!(ch->ch_flags & CHANF_KERNEL));
517 	ASSERT(proc_pid(p) == ch->ch_pid);
518 	/*
519 	 * If we're already draining, then bail.  Otherwise, check it
520 	 * again via na_drain() with the channel lock held.
521 	 */
522 	if (ch->ch_na->na_flags & NAF_DRAINING) {
523 		return;
524 	}
525 
526 	SK_DF(SK_VERB_CHANNEL, "%s(%d) ch 0x%llx flags 0x%b status %s",
527 	    sk_proc_name_address(p), sk_proc_pid(p), SK_KVA(ch),
528 	    ch->ch_flags, CHANF_BITS, sk_memstatus2str(status));
529 
530 	/* serialize accesses against channel syscalls */
531 	lck_mtx_lock(&ch->ch_lock);
532 	na_drain(ch->ch_na, TRUE);   /* purge caches */
533 	lck_mtx_unlock(&ch->ch_lock);
534 }
535 
536 static bool
_kern_channel_defunct_eligible(struct kern_channel * ch)537 _kern_channel_defunct_eligible(struct kern_channel *ch)
538 {
539 	struct nexus_upipe_adapter *pna;
540 
541 	if ((ch->ch_info->cinfo_ch_mode & CHMODE_DEFUNCT_OK) == 0) {
542 		return false;
543 	}
544 	if (ch->ch_na->na_type != NA_USER_PIPE) {
545 		return true;
546 	}
547 	pna = (struct nexus_upipe_adapter *)ch->ch_na;
548 	if ((pna->pna_parent->na_flags & NAF_DEFUNCT_OK) == 0) {
549 		return false;
550 	}
551 	return true;
552 }
553 
554 void
kern_channel_defunct(struct proc * p,struct kern_channel * ch)555 kern_channel_defunct(struct proc *p, struct kern_channel *ch)
556 {
557 #pragma unused(p)
558 	uint32_t ch_mode = ch->ch_info->cinfo_ch_mode;
559 
560 	SK_LOCK_ASSERT_NOTHELD();
561 
562 	ASSERT(!(ch->ch_flags & CHANF_KERNEL));
563 	ASSERT(proc_pid(p) == ch->ch_pid);
564 	/*
565 	 * If the channel is eligible for defunct, mark it as such.
566 	 * Otherwise, set the draining flag which tells the reaper
567 	 * thread to purge any cached objects associated with it.
568 	 * That draining flag will be cleared then, which allows the
569 	 * channel to cache objects again once the process is resumed.
570 	 */
571 	if (_kern_channel_defunct_eligible(ch)) {
572 		struct kern_nexus *nx = ch->ch_nexus;
573 		struct kern_nexus_domain_provider *nxdom_prov = NX_DOM_PROV(nx);
574 		boolean_t need_defunct;
575 		int err;
576 
577 		/*
578 		 * This may be called often, so check first (without lock) if
579 		 * the trapdoor flag CHANF_DEFUNCT has been set and bail if so,
580 		 * for performance reasons.  This check is repeated below with
581 		 * the channel lock held.
582 		 */
583 		if (ch->ch_flags & CHANF_DEFUNCT) {
584 			return;
585 		}
586 
587 		SK_DF(SK_VERB_CHANNEL, "%s(%d) ch 0x%llx flags 0x%b",
588 		    sk_proc_name_address(p), sk_proc_pid(p), SK_KVA(ch),
589 		    ch->ch_flags, CHANF_BITS);
590 
591 		/* serialize accesses against channel syscalls */
592 		lck_mtx_lock(&ch->ch_lock);
593 
594 		/*
595 		 * If opportunistic defunct is in effect, skip the rest of
596 		 * the defunct work based on two cases:
597 		 *
598 		 *   a) if the channel isn't using user packet pool; or
599 		 *   b) if the channel is using user packet pool and we
600 		 *      detect that there are outstanding allocations.
601 		 *
602 		 * Note that for case (a) above we essentially treat the
603 		 * channel as ineligible for defunct, and although it may
604 		 * be idle we'd leave the memory mapping intact.  This
605 		 * should not be a concern as the majority of channels are
606 		 * on flowswitches where user packet pool is mandatory.
607 		 *
608 		 * If skipping, mark the channel with CHANF_DEFUNCT_SKIP
609 		 * and increment the stats (for flowswitch only).
610 		 */
611 		if (sk_opp_defunct && (!(ch_mode & CHMODE_USER_PACKET_POOL) ||
612 		    !pp_isempty_upp(ch->ch_pp))) {
613 			if (ch->ch_na->na_type == NA_FLOWSWITCH_VP) {
614 				struct nx_flowswitch *fsw =
615 				    VPNA(ch->ch_na)->vpna_fsw;
616 				STATS_INC(&fsw->fsw_stats,
617 				    FSW_STATS_CHAN_DEFUNCT_SKIP);
618 			}
619 			(void) atomic_bitset_32_ov(&ch->ch_flags,
620 			    CHANF_DEFUNCT_SKIP);
621 			/* skip defunct */
622 			lck_mtx_unlock(&ch->ch_lock);
623 			return;
624 		}
625 		(void) atomic_bitclear_32(&ch->ch_flags, CHANF_DEFUNCT_SKIP);
626 
627 		/*
628 		 * Proceed with the rest of the defunct work.
629 		 */
630 		if (atomic_bitset_32_ov(&ch->ch_flags, CHANF_DEFUNCT) &
631 		    CHANF_DEFUNCT) {
632 			/* already defunct; nothing to do */
633 			lck_mtx_unlock(&ch->ch_lock);
634 			return;
635 		}
636 
637 		/* mark this channel as inactive */
638 		ch_deactivate(ch);
639 
640 		/*
641 		 * Redirect memory regions for the map; upon success, instruct
642 		 * the nexus to finalize the defunct and teardown the respective
643 		 * memory regions.  It's crucial that the redirection happens
644 		 * first before freeing the objects, since the page protection
645 		 * flags get inherited only from unfreed segments.  Freed ones
646 		 * will cause VM_PROT_NONE to be used for the segment span, to
647 		 * catch use-after-free cases.  For unfreed objects, doing so
648 		 * may cause an exception when the process is later resumed
649 		 * and touches an address within the span; hence the ordering.
650 		 */
651 		if ((err = skmem_arena_mredirect(ch->ch_na->na_arena,
652 		    &ch->ch_mmap, p, &need_defunct)) == 0 && need_defunct) {
653 			/*
654 			 * Let the domain provider handle the initial tasks of
655 			 * the defunct that are specific to this channel.  It
656 			 * may safely free objects as the redirection is done.
657 			 */
658 			nxdom_prov->nxdom_prov_dom->nxdom_defunct(nxdom_prov,
659 			    nx, ch, p);
660 			/*
661 			 * Let the domain provider complete the defunct;
662 			 * do this after dropping the channel lock, as
663 			 * the nexus may end up acquiring other locks
664 			 * that would otherwise violate lock ordering.
665 			 * The channel refcnt is still held by virtue
666 			 * of the caller holding the process's file
667 			 * table lock.
668 			 */
669 			lck_mtx_unlock(&ch->ch_lock);
670 			nxdom_prov->nxdom_prov_dom->nxdom_defunct_finalize(
671 				nxdom_prov, nx, ch, FALSE);
672 		} else if (err == 0) {
673 			/*
674 			 * Let the domain provider handle the initial tasks of
675 			 * the defunct that are specific to this channel.  It
676 			 * may sadely free objects as the redirection is done.
677 			 */
678 			nxdom_prov->nxdom_prov_dom->nxdom_defunct(nxdom_prov,
679 			    nx, ch, p);
680 			lck_mtx_unlock(&ch->ch_lock);
681 		} else {
682 			/* already redirected; nothing to do */
683 			lck_mtx_unlock(&ch->ch_lock);
684 		}
685 	} else {
686 		lck_mtx_lock(&ch->ch_lock);
687 		na_drain(ch->ch_na, FALSE);  /* prune caches */
688 		lck_mtx_unlock(&ch->ch_lock);
689 	}
690 }
691