xref: /xnu-8020.121.3/bsd/skywalk/nexus/flowswitch/flow/flow_owner.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/flowswitch/fsw_var.h>
31 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
32 
33 static uint32_t flow_owner_bucket_purge_common(struct flow_owner_bucket *,
34     nexus_port_t, boolean_t);
35 static int fo_cmp(const struct flow_owner *, const struct flow_owner *);
36 static struct flow_owner *fo_alloc(boolean_t);
37 static void fo_free(struct flow_owner *);
38 
39 static LCK_GRP_DECLARE(flow_owner_lock_group, "sk_flow_owner_lock");
40 static LCK_ATTR_DECLARE(flow_owner_lock_attr, 0, 0);
41 
42 RB_GENERATE_PREV(flow_owner_tree, flow_owner, fo_link, fo_cmp);
43 
44 struct flow_owner_bucket *
flow_owner_buckets_alloc(size_t fob_cnt,size_t * fob_sz,size_t * tot_sz)45 flow_owner_buckets_alloc(size_t fob_cnt, size_t *fob_sz, size_t *tot_sz)
46 {
47 	size_t cache_sz = skmem_cpu_cache_line_size();
48 	struct flow_owner_bucket *fob;
49 	void *fob_buf, **fob_pbuf;
50 	size_t fob_tot_sz;
51 
52 	/* each bucket is CPU cache-aligned */
53 	*fob_sz = P2ROUNDUP(sizeof(*fob), cache_sz);
54 
55 	/* total size includes extra for alignment requirements */
56 	*tot_sz = fob_tot_sz = (sizeof(void *) + (fob_cnt * (*fob_sz)) + cache_sz);
57 	fob_buf = sk_alloc(fob_tot_sz, Z_WAITOK, skmem_tag_fsw_fob_hash);
58 	if (__improbable(fob_buf == NULL)) {
59 		return NULL;
60 	}
61 
62 	/*
63 	 * In case we didn't get a cache-aligned memory, round it up
64 	 * accordingly.  This is needed in order to get the rest of
65 	 * the structure members aligned properly.  It also means that
66 	 * the memory span gets shifted due to the round up, but it
67 	 * is okay since we've allocated extra space for this.
68 	 */
69 	fob = (struct flow_owner_bucket *)
70 	    P2ROUNDUP((intptr_t)fob_buf + sizeof(void *), cache_sz);
71 	fob_pbuf = (void **)((intptr_t)fob - sizeof(void *));
72 	ASSERT((intptr_t)fob_pbuf >= (intptr_t)fob_buf);
73 	ASSERT(((intptr_t)fob + (fob_cnt * (*fob_sz))) <=
74 	    ((intptr_t)fob_buf + fob_tot_sz));
75 	*fob_pbuf = fob_buf;
76 
77 	SK_DF(SK_VERB_MEM, "fob 0x%llx fob_cnt %zu fob_sz %zu "
78 	    "(total %zu bytes, fob_buf 0x%llx) ALLOC", SK_KVA(fob), fob_cnt,
79 	    *fob_sz, fob_tot_sz, SK_KVA(fob_buf));
80 
81 	return fob;
82 }
83 
84 void
flow_owner_buckets_free(struct flow_owner_bucket * fob,size_t tot_sz)85 flow_owner_buckets_free(struct flow_owner_bucket *fob, size_t tot_sz)
86 {
87 	void *fob_buf, **fob_pbuf;
88 
89 	/* get the original address that we stuffed in earlier and free it */
90 	fob_pbuf = (void **)((intptr_t)fob - sizeof(void *));
91 	fob_buf = *fob_pbuf;
92 	SK_DF(SK_VERB_MEM, "fob 0x%llx (fob_buf 0x%llx) FREE", SK_KVA(fob),
93 	    SK_KVA(fob_buf));
94 	sk_free(fob_buf, tot_sz);
95 }
96 
97 void
flow_owner_bucket_init(struct flow_owner_bucket * fob)98 flow_owner_bucket_init(struct flow_owner_bucket *fob)
99 {
100 	ASSERT(IS_P2ALIGNED(fob, skmem_cpu_cache_line_size()));
101 	lck_mtx_init(&fob->fob_lock, &flow_owner_lock_group,
102 	    &flow_owner_lock_attr);
103 	RB_INIT(&fob->fob_owner_head);
104 }
105 
106 void
flow_owner_bucket_destroy(struct flow_owner_bucket * fob)107 flow_owner_bucket_destroy(struct flow_owner_bucket *fob)
108 {
109 	/*
110 	 * In the event we are called as part of the nexus destructor,
111 	 * we need to wait until all threads have exited the flow close
112 	 * critical section, and that the flow_owner_bucket is empty.
113 	 * By the time we get here, the module initiating the request
114 	 * (e.g. NECP) has been quiesced, so any flow open requests would
115 	 * have been rejected.
116 	 */
117 	FOB_LOCK(fob);
118 	while (!RB_EMPTY(&fob->fob_owner_head)) {
119 		SK_ERR("waiting for fob 0x%llx to go idle", SK_KVA(fob));
120 		if (++(fob->fob_dtor_waiters) == 0) {   /* wraparound */
121 			fob->fob_dtor_waiters++;
122 		}
123 		(void) msleep(&fob->fob_dtor_waiters, &fob->fob_lock,
124 		    (PZERO - 1), __FUNCTION__, NULL);
125 	}
126 	while (fob->fob_busy_flags & FOBF_CLOSE_BUSY) {
127 		if (++(fob->fob_close_waiters) == 0) {  /* wraparound */
128 			fob->fob_close_waiters++;
129 		}
130 		(void) msleep(&fob->fob_close_waiters, &fob->fob_lock,
131 		    (PZERO - 1), __FUNCTION__, NULL);
132 	}
133 	ASSERT(RB_EMPTY(&fob->fob_owner_head));
134 	ASSERT(!(fob->fob_busy_flags & FOBF_OPEN_BUSY));
135 	ASSERT(!(fob->fob_busy_flags & FOBF_CLOSE_BUSY));
136 	FOB_UNLOCK(fob);
137 	lck_mtx_destroy(&fob->fob_lock, &flow_owner_lock_group);
138 }
139 
140 static uint32_t
flow_owner_bucket_purge_common(struct flow_owner_bucket * fob,nexus_port_t nx_port,boolean_t if_idle)141 flow_owner_bucket_purge_common(struct flow_owner_bucket *fob,
142     nexus_port_t nx_port, boolean_t if_idle)
143 {
144 	/* called by flow_owner_bucket_purge_all()? */
145 	boolean_t locked = (nx_port == NEXUS_PORT_ANY);
146 	struct flow_owner *fo, *tfo;
147 	struct flow_entry *fe, *tfe;
148 	uint32_t cnt = 0;
149 
150 	if (!locked) {
151 		FOB_LOCK(fob);
152 	}
153 	FOB_LOCK_ASSERT_HELD(fob);
154 
155 	RB_FOREACH_SAFE(fo, flow_owner_tree, &fob->fob_owner_head, tfo) {
156 		if (fo->fo_nx_port != nx_port && nx_port != NEXUS_PORT_ANY) {
157 			continue;
158 		}
159 
160 		if (!if_idle || nx_port == NEXUS_PORT_ANY) {
161 			RB_FOREACH_SAFE(fe, flow_entry_id_tree,
162 			    &fo->fo_flow_entry_id_head, tfe) {
163 				ASSERT(fe->fe_nx_port == fo->fo_nx_port);
164 				flow_entry_retain(fe);
165 				flow_entry_destroy(fo, fe, FALSE, NULL);
166 			}
167 		}
168 
169 		ASSERT(nx_port != NEXUS_PORT_ANY ||
170 		    RB_EMPTY(&fo->fo_flow_entry_id_head));
171 
172 		if (RB_EMPTY(&fo->fo_flow_entry_id_head)) {
173 			flow_owner_free(fob, fo);
174 			++cnt;
175 		} else if (nx_port != NEXUS_PORT_ANY) {
176 			/* let ms_flow_unbind() know this port is gone */
177 			fo->fo_nx_port_destroyed = TRUE;
178 			VERIFY(fo->fo_nx_port_na == NULL);
179 		}
180 	}
181 
182 	if (!locked) {
183 		FOB_UNLOCK(fob);
184 	}
185 
186 	return cnt;
187 }
188 
189 void
flow_owner_bucket_purge_all(struct flow_owner_bucket * fob)190 flow_owner_bucket_purge_all(struct flow_owner_bucket *fob)
191 {
192 	(void) flow_owner_bucket_purge_common(fob, NEXUS_PORT_ANY, TRUE);
193 }
194 
195 static uint32_t
flow_owner_bucket_activate_nx_port_common(struct flow_owner_bucket * fob,nexus_port_t nx_port,struct nexus_adapter * nx_port_na,na_activate_mode_t mode)196 flow_owner_bucket_activate_nx_port_common(struct flow_owner_bucket *fob,
197     nexus_port_t nx_port, struct nexus_adapter *nx_port_na,
198     na_activate_mode_t mode)
199 {
200 	struct flow_owner *fo;
201 	struct flow_entry *fe;
202 	uint32_t cnt = 0;
203 
204 	VERIFY(nx_port != NEXUS_PORT_ANY);
205 	FOB_LOCK(fob);
206 
207 	RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
208 		if (fo->fo_nx_port_destroyed || (fo->fo_nx_port != nx_port)) {
209 			continue;
210 		}
211 
212 		if (mode == NA_ACTIVATE_MODE_ON) {
213 			VERIFY(fo->fo_nx_port_na == NULL);
214 			*(struct nexus_adapter **)(uintptr_t)&fo->fo_nx_port_na = nx_port_na;
215 		}
216 
217 		RB_FOREACH(fe, flow_entry_id_tree,
218 		    &fo->fo_flow_entry_id_head) {
219 			if (fe->fe_flags & FLOWENTF_TORN_DOWN) {
220 				continue;
221 			}
222 			VERIFY(fe->fe_nx_port == fo->fo_nx_port);
223 			if (fe->fe_adv_idx != FLOWADV_IDX_NONE) {
224 				if (mode == NA_ACTIVATE_MODE_ON) {
225 					na_flowadv_entry_alloc(fo->fo_nx_port_na,
226 					    fe->fe_uuid, fe->fe_adv_idx);
227 				} else if (fo->fo_nx_port_na != NULL) {
228 					na_flowadv_entry_free(fo->fo_nx_port_na,
229 					    fe->fe_uuid, fe->fe_adv_idx);
230 				}
231 			}
232 		}
233 
234 		if (mode != NA_ACTIVATE_MODE_ON && fo->fo_nx_port_na != NULL) {
235 			*(struct nexus_adapter **)(uintptr_t)&fo->fo_nx_port_na = NULL;
236 		}
237 
238 		++cnt;
239 	}
240 
241 	FOB_UNLOCK(fob);
242 	return cnt;
243 }
244 
245 uint32_t
flow_owner_activate_nexus_port(struct flow_mgr * fm,boolean_t pid_bound,pid_t pid,nexus_port_t nx_port,struct nexus_adapter * nx_port_na,na_activate_mode_t mode)246 flow_owner_activate_nexus_port(struct flow_mgr *fm,
247     boolean_t pid_bound, pid_t pid, nexus_port_t nx_port,
248     struct nexus_adapter *nx_port_na, na_activate_mode_t mode)
249 {
250 	struct flow_owner_bucket *fob;
251 	uint32_t fo_cnt = 0;
252 
253 	VERIFY(nx_port != NEXUS_PORT_ANY);
254 	VERIFY(nx_port_na != NULL);
255 
256 	if (pid_bound) {
257 		fob = flow_mgr_get_fob_by_pid(fm, pid);
258 		fo_cnt = flow_owner_bucket_activate_nx_port_common(fob, nx_port,
259 		    nx_port_na, mode);
260 	} else {
261 		uint32_t i;
262 		/*
263 		 * Otherwise, this can get expensive since we need to search
264 		 * thru all proc-mapping buckets to find the flows that are
265 		 * related to this nexus port.
266 		 */
267 		for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
268 			fob = flow_mgr_get_fob_at_idx(fm, i);
269 			fo_cnt += flow_owner_bucket_activate_nx_port_common(fob,
270 			    nx_port, nx_port_na, mode);
271 		}
272 	}
273 	/* There shouldn't be more than one flow owners on a nexus port */
274 	VERIFY(fo_cnt <= 1);
275 	return fo_cnt;
276 }
277 
278 static void
flow_owner_bucket_attach_common(struct flow_owner_bucket * fob,nexus_port_t nx_port)279 flow_owner_bucket_attach_common(struct flow_owner_bucket *fob,
280     nexus_port_t nx_port)
281 {
282 	struct flow_owner *fo;
283 
284 	VERIFY(nx_port != NEXUS_PORT_ANY);
285 	FOB_LOCK(fob);
286 
287 	RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
288 		if (fo->fo_nx_port_destroyed && (fo->fo_nx_port == nx_port)) {
289 			fo->fo_nx_port_destroyed = FALSE;
290 		}
291 	}
292 
293 	FOB_UNLOCK(fob);
294 }
295 
296 void
flow_owner_attach_nexus_port(struct flow_mgr * fm,boolean_t pid_bound,pid_t pid,nexus_port_t nx_port)297 flow_owner_attach_nexus_port(struct flow_mgr *fm, boolean_t pid_bound,
298     pid_t pid, nexus_port_t nx_port)
299 {
300 	struct flow_owner_bucket *fob;
301 	ASSERT(nx_port != NEXUS_PORT_ANY);
302 
303 	if (pid_bound) {
304 		fob = flow_mgr_get_fob_by_pid(fm, pid);
305 		flow_owner_bucket_attach_common(fob, nx_port);
306 	} else {
307 		uint32_t i;
308 		/*
309 		 * Otherwise, this can get expensive since we need to search
310 		 * thru all proc-mapping buckets to find the flows that are
311 		 * related to this nexus port.
312 		 */
313 		for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
314 			fob = flow_mgr_get_fob_at_idx(fm, i);
315 			flow_owner_bucket_attach_common(fob, nx_port);
316 		}
317 	}
318 }
319 
320 uint32_t
flow_owner_detach_nexus_port(struct flow_mgr * fm,boolean_t pid_bound,pid_t pid,nexus_port_t nx_port,boolean_t if_idle)321 flow_owner_detach_nexus_port(struct flow_mgr *fm, boolean_t pid_bound,
322     pid_t pid, nexus_port_t nx_port, boolean_t if_idle)
323 {
324 	struct flow_owner_bucket *fob;
325 	uint32_t purged = 0;
326 	ASSERT(nx_port != NEXUS_PORT_ANY);
327 
328 	if (pid_bound) {
329 		fob = flow_mgr_get_fob_by_pid(fm, pid);
330 		purged = flow_owner_bucket_purge_common(fob, nx_port, if_idle);
331 	} else {
332 		uint32_t i;
333 		/*
334 		 * Otherwise, this can get expensive since we need to search
335 		 * thru all proc-mapping buckets to find the flows that are
336 		 * related to this nexus port.
337 		 */
338 		for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
339 			fob = flow_mgr_get_fob_at_idx(fm, i);
340 			purged += flow_owner_bucket_purge_common(fob,
341 			    nx_port, if_idle);
342 		}
343 	}
344 	return purged;
345 }
346 
347 /* 64-bit mask with range */
348 #define FO_BMASK64(_beg, _end)  \
349 	((((uint64_t)0xffffffffffffffff) >>     \
350 	    (63 - (_end))) & ~((1ULL << (_beg)) - 1))
351 
352 struct flow_owner *
flow_owner_alloc(struct flow_owner_bucket * fob,struct proc * p,nexus_port_t nx_port,bool nx_port_pid_bound,bool flowadv,struct nx_flowswitch * fsw,struct nexus_adapter * nx_port_na,void * context,bool low_latency)353 flow_owner_alloc(struct flow_owner_bucket *fob, struct proc *p,
354     nexus_port_t nx_port, bool nx_port_pid_bound, bool flowadv,
355     struct nx_flowswitch *fsw, struct nexus_adapter *nx_port_na,
356     void *context, bool low_latency)
357 {
358 	struct flow_owner *fo;
359 	const pid_t pid = proc_pid(p);
360 
361 	_CASSERT(true == 1);
362 	_CASSERT(false == 0);
363 	ASSERT(low_latency == true || low_latency == false);
364 	ASSERT(nx_port != NEXUS_PORT_ANY);
365 	FOB_LOCK_ASSERT_HELD(fob);
366 
367 #if DEBUG
368 	ASSERT(flow_owner_find_by_pid(fob, pid, context, low_latency) == NULL);
369 	RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
370 		if (!fo->fo_nx_port_destroyed && (fo->fo_nx_port == nx_port)) {
371 			VERIFY(0);
372 			/* NOTREACHED */
373 			__builtin_unreachable();
374 		}
375 	}
376 #endif /* DEBUG */
377 
378 	fo = fo_alloc(TRUE);
379 	if (fo != NULL) {
380 		if (flowadv) {
381 			uint32_t i;
382 
383 			if ((fo->fo_flowadv_bmap =
384 			    skmem_cache_alloc(sk_fab_cache, SKMEM_SLEEP)) == NULL) {
385 				SK_ERR("failed to alloc flow advisory bitmap");
386 				fo_free(fo);
387 				return NULL;
388 			}
389 			bzero(fo->fo_flowadv_bmap, sk_fab_size);
390 			fo->fo_flowadv_max = sk_max_flows;
391 
392 			/* set the bits for free indices */
393 			for (i = 0; i < sk_fadv_nchunks; i++) {
394 				uint32_t end = 63;
395 
396 				if (i == (sk_fadv_nchunks - 1)) {
397 					end = ((sk_max_flows - 1) %
398 					    FO_FLOWADV_CHUNK);
399 				}
400 
401 				fo->fo_flowadv_bmap[i] = FO_BMASK64(0, end);
402 			}
403 		}
404 		RB_INIT(&fo->fo_flow_entry_id_head);
405 		/* const override */
406 		*(struct flow_owner_bucket **)(uintptr_t)&fo->fo_bucket = fob;
407 		fo->fo_context = context;
408 		fo->fo_pid = pid;
409 		(void) snprintf(fo->fo_name, sizeof(fo->fo_name), "%s",
410 		    proc_name_address(p));
411 		fo->fo_nx_port_pid_bound = nx_port_pid_bound;
412 		fo->fo_low_latency = low_latency;
413 		fo->fo_nx_port = nx_port;
414 		*(struct nexus_adapter **)(uintptr_t)&fo->fo_nx_port_na = nx_port_na;
415 		*(struct nx_flowswitch **)(uintptr_t)&fo->fo_fsw = fsw;
416 		RB_INSERT(flow_owner_tree, &fob->fob_owner_head, fo);
417 
418 		SK_DF(SK_VERB_FLOW, "%s(%d) fob 0x%llx added fo 0x%llx "
419 		    "nx_port %d nx_port_pid_bound %d ll %d nx_port_na 0x%llx",
420 		    fo->fo_name, fo->fo_pid, SK_KVA(fob), SK_KVA(fo),
421 		    (int)nx_port, nx_port_pid_bound, fo->fo_low_latency,
422 		    SK_KVA(nx_port_na));
423 	}
424 
425 	return fo;
426 }
427 
428 void
flow_owner_free(struct flow_owner_bucket * fob,struct flow_owner * fo)429 flow_owner_free(struct flow_owner_bucket *fob, struct flow_owner *fo)
430 {
431 	FOB_LOCK_ASSERT_HELD(fob);
432 
433 	ASSERT(fo->fo_bucket == fob);
434 	*(struct flow_owner_bucket **)(uintptr_t)&fo->fo_bucket = NULL;
435 	RB_REMOVE(flow_owner_tree, &fob->fob_owner_head, fo);
436 
437 	ASSERT(fo->fo_num_flowadv == 0);
438 	skmem_cache_free(sk_fab_cache, fo->fo_flowadv_bmap);
439 	fo->fo_flowadv_bmap = NULL;
440 
441 	/* wake up any thread blocked in flow_owner_bucket_destroy() */
442 	if (RB_EMPTY(&fob->fob_owner_head) && fob->fob_dtor_waiters > 0) {
443 		fob->fob_dtor_waiters = 0;
444 		wakeup(&fob->fob_dtor_waiters);
445 	}
446 
447 	SK_DF(SK_VERB_FLOW, "%s(%d) fob 0x%llx removed fo 0x%llx nx_port %d",
448 	    fo->fo_name, fo->fo_pid, SK_KVA(fob), SK_KVA(fo),
449 	    (int)fo->fo_nx_port);
450 
451 	fo_free(fo);
452 }
453 
454 int
flow_owner_flowadv_index_alloc(struct flow_owner * fo,flowadv_idx_t * fadv_idx)455 flow_owner_flowadv_index_alloc(struct flow_owner *fo, flowadv_idx_t *fadv_idx)
456 {
457 	bitmap_t *bmap = fo->fo_flowadv_bmap;
458 	size_t nchunks, i, j, idx = FLOWADV_IDX_NONE;
459 
460 	FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
461 	ASSERT(fo->fo_flowadv_max != 0);
462 
463 	nchunks = P2ROUNDUP(fo->fo_flowadv_max, FO_FLOWADV_CHUNK) /
464 	    FO_FLOWADV_CHUNK;
465 
466 	for (i = 0; i < nchunks; i++) {
467 		j = ffsll(bmap[i]);
468 		if (j == 0) {
469 			/* All indices in this chunk are in use */
470 			continue;
471 		}
472 		--j;
473 		/* mark the index as in use */
474 		bit_clear(bmap[i], j);
475 		idx = (i * FO_FLOWADV_CHUNK) + j;
476 		break;
477 	}
478 
479 	if (idx == FLOWADV_IDX_NONE) {
480 		SK_ERR("%s(%d) flow advisory table full: num %u max %u",
481 		    fo->fo_name, fo->fo_pid, fo->fo_num_flowadv,
482 		    fo->fo_flowadv_max);
483 		VERIFY(fo->fo_num_flowadv == fo->fo_flowadv_max);
484 		*fadv_idx = FLOWADV_IDX_NONE;
485 		return ENOSPC;
486 	}
487 
488 	fo->fo_num_flowadv++;
489 	ASSERT(idx < ((flowadv_idx_t) -1));
490 	*fadv_idx = (flowadv_idx_t)idx;
491 	ASSERT(*fadv_idx < fo->fo_flowadv_max);
492 	return 0;
493 }
494 
495 void
flow_owner_flowadv_index_free(struct flow_owner * fo,flowadv_idx_t fadv_idx)496 flow_owner_flowadv_index_free(struct flow_owner *fo, flowadv_idx_t fadv_idx)
497 {
498 	uint32_t chunk_idx, bit_pos;
499 	bitmap_t *bmap = fo->fo_flowadv_bmap;
500 
501 	FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
502 	ASSERT(fo->fo_num_flowadv != 0);
503 	ASSERT((fo->fo_flowadv_max != 0) && (fadv_idx < fo->fo_flowadv_max));
504 
505 	chunk_idx = fadv_idx / FO_FLOWADV_CHUNK;
506 	bit_pos = fadv_idx % FO_FLOWADV_CHUNK;
507 	ASSERT(!bit_test(bmap[chunk_idx], bit_pos));
508 	/* mark the index as free */
509 	bit_set(bmap[chunk_idx], bit_pos);
510 	fo->fo_num_flowadv--;
511 }
512 
513 int
flow_owner_destroy_entry(struct flow_owner * fo,uuid_t uuid,bool nolinger,void * close_params)514 flow_owner_destroy_entry(struct flow_owner *fo, uuid_t uuid,
515     bool nolinger, void *close_params)
516 {
517 	struct flow_entry *fe = NULL;
518 	int err = 0;
519 
520 	FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
521 
522 	/* lookup such flow for this process */
523 	fe = flow_entry_find_by_uuid(fo, uuid);
524 	if (fe == NULL) {
525 		err = ENOENT;
526 	} else {
527 		/* free flow entry (OK to linger if caller asked) */
528 		flow_entry_destroy(fo, fe, nolinger, close_params);
529 	}
530 
531 	return err;
532 }
533 
534 static inline int
fo_cmp(const struct flow_owner * a,const struct flow_owner * b)535 fo_cmp(const struct flow_owner *a, const struct flow_owner *b)
536 {
537 	if (a->fo_pid > b->fo_pid) {
538 		return 1;
539 	}
540 	if (a->fo_pid < b->fo_pid) {
541 		return -1;
542 	}
543 	if ((intptr_t)a->fo_context > (intptr_t)b->fo_context) {
544 		return 1;
545 	} else if ((intptr_t)a->fo_context < (intptr_t)b->fo_context) {
546 		return -1;
547 	}
548 	if (a->fo_low_latency != b->fo_low_latency) {
549 		if (a->fo_low_latency) {
550 			return 1;
551 		} else {
552 			return -1;
553 		}
554 	}
555 	return 0;
556 }
557 
558 static struct flow_owner *
fo_alloc(boolean_t can_block)559 fo_alloc(boolean_t can_block)
560 {
561 	struct flow_owner *fo;
562 
563 	fo = skmem_cache_alloc(sk_fo_cache,
564 	    can_block ? SKMEM_SLEEP : SKMEM_NOSLEEP);
565 	if (fo == NULL) {
566 		return NULL;
567 	}
568 
569 	bzero(fo, sk_fo_size);
570 
571 	SK_DF(SK_VERB_MEM, "fo 0x%llx ALLOC", SK_KVA(fo));
572 
573 	return fo;
574 }
575 
576 static void
fo_free(struct flow_owner * fo)577 fo_free(struct flow_owner *fo)
578 {
579 	ASSERT(fo->fo_bucket == NULL);
580 	ASSERT(RB_EMPTY(&fo->fo_flow_entry_id_head));
581 	ASSERT(fo->fo_flowadv_bmap == NULL);
582 
583 	SK_DF(SK_VERB_MEM, "fo 0x%llx FREE", SK_KVA(fo));
584 
585 	skmem_cache_free(sk_fo_cache, fo);
586 }
587