1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/flowswitch/fsw_var.h>
31 #include <skywalk/nexus/flowswitch/flow/flow_var.h>
32
33 static uint32_t flow_owner_bucket_purge_common(struct flow_owner_bucket *,
34 nexus_port_t, boolean_t);
35 static int fo_cmp(const struct flow_owner *, const struct flow_owner *);
36 static struct flow_owner *fo_alloc(boolean_t);
37 static void fo_free(struct flow_owner *);
38
39 static LCK_GRP_DECLARE(flow_owner_lock_group, "sk_flow_owner_lock");
40 static LCK_ATTR_DECLARE(flow_owner_lock_attr, 0, 0);
41
42 RB_GENERATE_PREV(flow_owner_tree, flow_owner, fo_link, fo_cmp);
43
44 KALLOC_TYPE_VAR_DEFINE(KT_SK_FOB, struct flow_owner_bucket, KT_DEFAULT);
45
46 struct flow_owner_bucket *
47 __sized_by(*tot_sz)
flow_owner_buckets_alloc(size_t fob_cnt,size_t * fob_sz,size_t * tot_sz)48 flow_owner_buckets_alloc(size_t fob_cnt, size_t * fob_sz, size_t * tot_sz){
49 size_t cache_sz = skmem_cpu_cache_line_size();
50 struct flow_owner_bucket *fob;
51 size_t fob_tot_sz;
52
53 /* each bucket is CPU cache-aligned */
54 *fob_sz = P2ROUNDUP(sizeof(*fob), cache_sz);
55 *tot_sz = fob_tot_sz = fob_cnt * (*fob_sz);
56 fob = sk_alloc_type_hash(KT_SK_FOB, fob_tot_sz, Z_WAITOK, skmem_tag_fsw_fob_hash);
57 if (__improbable(fob == NULL)) {
58 return NULL;
59 }
60
61 #if !KASAN_CLASSIC
62 /*
63 * except in KASAN_CLASSIC mode, kalloc will always maintain cacheline
64 * size alignment if the requested size is a multiple of a cacheline
65 * size (this is true for any size that is a power of two from 16 to
66 * PAGE_SIZE).
67 *
68 * Because this is an optimization only, it is OK to leave KASAN_CLASSIC
69 * not respect this.
70 */
71 ASSERT(IS_P2ALIGNED(fob, cache_sz));
72 #endif
73
74 SK_DF(SK_VERB_MEM, "fob %p fob_cnt %zu fob_sz %zu "
75 "(total %zu bytes) ALLOC", SK_KVA(fob), fob_cnt,
76 *fob_sz, fob_tot_sz);
77
78 return fob;
79 }
80
81 void
flow_owner_buckets_free(struct flow_owner_bucket * fob,size_t tot_sz)82 flow_owner_buckets_free(struct flow_owner_bucket *fob, size_t tot_sz)
83 {
84 SK_DF(SK_VERB_MEM, "fob %p FREE", SK_KVA(fob));
85 sk_free_type_hash(KT_SK_FOB, tot_sz, fob);
86 }
87
88 void
flow_owner_bucket_init(struct flow_owner_bucket * fob)89 flow_owner_bucket_init(struct flow_owner_bucket *fob)
90 {
91 #if !KASAN_CLASSIC
92 ASSERT(IS_P2ALIGNED(fob, skmem_cpu_cache_line_size()));
93 #endif /* !KASAN_CLASSIC */
94 lck_mtx_init(&fob->fob_lock, &flow_owner_lock_group,
95 &flow_owner_lock_attr);
96 RB_INIT(&fob->fob_owner_head);
97 }
98
99 void
flow_owner_bucket_destroy(struct flow_owner_bucket * fob)100 flow_owner_bucket_destroy(struct flow_owner_bucket *fob)
101 {
102 /*
103 * In the event we are called as part of the nexus destructor,
104 * we need to wait until all threads have exited the flow close
105 * critical section, and that the flow_owner_bucket is empty.
106 * By the time we get here, the module initiating the request
107 * (e.g. NECP) has been quiesced, so any flow open requests would
108 * have been rejected.
109 */
110 FOB_LOCK(fob);
111 while (!RB_EMPTY(&fob->fob_owner_head)) {
112 SK_ERR("waiting for fob %p to go idle", SK_KVA(fob));
113 if (++(fob->fob_dtor_waiters) == 0) { /* wraparound */
114 fob->fob_dtor_waiters++;
115 }
116 (void) msleep(&fob->fob_dtor_waiters, &fob->fob_lock,
117 (PZERO - 1), __FUNCTION__, NULL);
118 }
119 while (fob->fob_busy_flags & FOBF_CLOSE_BUSY) {
120 if (++(fob->fob_close_waiters) == 0) { /* wraparound */
121 fob->fob_close_waiters++;
122 }
123 (void) msleep(&fob->fob_close_waiters, &fob->fob_lock,
124 (PZERO - 1), __FUNCTION__, NULL);
125 }
126 ASSERT(RB_EMPTY(&fob->fob_owner_head));
127 ASSERT(!(fob->fob_busy_flags & FOBF_OPEN_BUSY));
128 ASSERT(!(fob->fob_busy_flags & FOBF_CLOSE_BUSY));
129 FOB_UNLOCK(fob);
130 lck_mtx_destroy(&fob->fob_lock, &flow_owner_lock_group);
131 }
132
133 static uint32_t
flow_owner_bucket_purge_common(struct flow_owner_bucket * fob,nexus_port_t nx_port,boolean_t if_idle)134 flow_owner_bucket_purge_common(struct flow_owner_bucket *fob,
135 nexus_port_t nx_port, boolean_t if_idle)
136 {
137 /* called by flow_owner_bucket_purge_all()? */
138 boolean_t locked = (nx_port == NEXUS_PORT_ANY);
139 struct flow_owner *fo, *tfo;
140 struct flow_entry *fe, *tfe;
141 uint32_t cnt = 0;
142
143 if (!locked) {
144 FOB_LOCK(fob);
145 }
146 FOB_LOCK_ASSERT_HELD(fob);
147
148 RB_FOREACH_SAFE(fo, flow_owner_tree, &fob->fob_owner_head, tfo) {
149 if (fo->fo_nx_port != nx_port && nx_port != NEXUS_PORT_ANY) {
150 continue;
151 }
152
153 if (!if_idle || nx_port == NEXUS_PORT_ANY) {
154 RB_FOREACH_SAFE(fe, flow_entry_id_tree,
155 &fo->fo_flow_entry_id_head, tfe) {
156 ASSERT(fe->fe_nx_port == fo->fo_nx_port);
157 flow_entry_retain(fe);
158 flow_entry_destroy(fo, fe, FALSE, NULL);
159 }
160 }
161
162 ASSERT(nx_port != NEXUS_PORT_ANY ||
163 RB_EMPTY(&fo->fo_flow_entry_id_head));
164
165 if (RB_EMPTY(&fo->fo_flow_entry_id_head)) {
166 flow_owner_free(fob, fo);
167 ++cnt;
168 } else if (nx_port != NEXUS_PORT_ANY) {
169 /* let ms_flow_unbind() know this port is gone */
170 fo->fo_nx_port_destroyed = TRUE;
171 VERIFY(fo->fo_nx_port_na == NULL);
172 }
173 }
174
175 if (!locked) {
176 FOB_UNLOCK(fob);
177 }
178
179 return cnt;
180 }
181
182 void
flow_owner_bucket_purge_all(struct flow_owner_bucket * fob)183 flow_owner_bucket_purge_all(struct flow_owner_bucket *fob)
184 {
185 (void) flow_owner_bucket_purge_common(fob, NEXUS_PORT_ANY, TRUE);
186 }
187
188 static uint32_t
flow_owner_bucket_activate_nx_port_common(struct flow_owner_bucket * fob,nexus_port_t nx_port,struct nexus_adapter * nx_port_na,na_activate_mode_t mode)189 flow_owner_bucket_activate_nx_port_common(struct flow_owner_bucket *fob,
190 nexus_port_t nx_port, struct nexus_adapter *nx_port_na,
191 na_activate_mode_t mode)
192 {
193 struct flow_owner *fo;
194 struct flow_entry *fe;
195 uint32_t cnt = 0;
196
197 VERIFY(nx_port != NEXUS_PORT_ANY);
198 FOB_LOCK(fob);
199
200 RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
201 if (fo->fo_nx_port_destroyed || (fo->fo_nx_port != nx_port)) {
202 continue;
203 }
204
205 if (mode == NA_ACTIVATE_MODE_ON) {
206 VERIFY(fo->fo_nx_port_na == NULL);
207 *(struct nexus_adapter **)(uintptr_t)&fo->fo_nx_port_na = nx_port_na;
208 }
209
210 RB_FOREACH(fe, flow_entry_id_tree,
211 &fo->fo_flow_entry_id_head) {
212 if (fe->fe_flags & FLOWENTF_TORN_DOWN) {
213 continue;
214 }
215 VERIFY(fe->fe_nx_port == fo->fo_nx_port);
216 if (fe->fe_adv_idx != FLOWADV_IDX_NONE) {
217 if (mode == NA_ACTIVATE_MODE_ON) {
218 na_flowadv_entry_alloc(
219 fo->fo_nx_port_na, fe->fe_uuid,
220 fe->fe_adv_idx, fe->fe_flowid);
221 } else if (fo->fo_nx_port_na != NULL) {
222 na_flowadv_entry_free(fo->fo_nx_port_na,
223 fe->fe_uuid, fe->fe_adv_idx,
224 fe->fe_flowid);
225 }
226 }
227 }
228
229 if (mode != NA_ACTIVATE_MODE_ON && fo->fo_nx_port_na != NULL) {
230 *(struct nexus_adapter **)(uintptr_t)&fo->fo_nx_port_na = NULL;
231 }
232
233 ++cnt;
234 }
235
236 FOB_UNLOCK(fob);
237 return cnt;
238 }
239
240 uint32_t
flow_owner_activate_nexus_port(struct flow_mgr * fm,boolean_t pid_bound,pid_t pid,nexus_port_t nx_port,struct nexus_adapter * nx_port_na,na_activate_mode_t mode)241 flow_owner_activate_nexus_port(struct flow_mgr *fm,
242 boolean_t pid_bound, pid_t pid, nexus_port_t nx_port,
243 struct nexus_adapter *nx_port_na, na_activate_mode_t mode)
244 {
245 struct flow_owner_bucket *fob;
246 uint32_t fo_cnt = 0;
247
248 VERIFY(nx_port != NEXUS_PORT_ANY);
249 VERIFY(nx_port_na != NULL);
250
251 if (pid_bound) {
252 fob = flow_mgr_get_fob_by_pid(fm, pid);
253 fo_cnt = flow_owner_bucket_activate_nx_port_common(fob, nx_port,
254 nx_port_na, mode);
255 } else {
256 uint32_t i;
257 /*
258 * Otherwise, this can get expensive since we need to search
259 * thru all proc-mapping buckets to find the flows that are
260 * related to this nexus port.
261 */
262 for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
263 fob = flow_mgr_get_fob_at_idx(fm, i);
264 fo_cnt += flow_owner_bucket_activate_nx_port_common(fob,
265 nx_port, nx_port_na, mode);
266 }
267 }
268 /* There shouldn't be more than one flow owners on a nexus port */
269 VERIFY(fo_cnt <= 1);
270 return fo_cnt;
271 }
272
273 static void
flow_owner_bucket_attach_common(struct flow_owner_bucket * fob,nexus_port_t nx_port)274 flow_owner_bucket_attach_common(struct flow_owner_bucket *fob,
275 nexus_port_t nx_port)
276 {
277 struct flow_owner *fo;
278
279 VERIFY(nx_port != NEXUS_PORT_ANY);
280 FOB_LOCK(fob);
281
282 RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
283 if (fo->fo_nx_port_destroyed && (fo->fo_nx_port == nx_port)) {
284 fo->fo_nx_port_destroyed = FALSE;
285 }
286 }
287
288 FOB_UNLOCK(fob);
289 }
290
291 void
flow_owner_attach_nexus_port(struct flow_mgr * fm,boolean_t pid_bound,pid_t pid,nexus_port_t nx_port)292 flow_owner_attach_nexus_port(struct flow_mgr *fm, boolean_t pid_bound,
293 pid_t pid, nexus_port_t nx_port)
294 {
295 struct flow_owner_bucket *fob;
296 ASSERT(nx_port != NEXUS_PORT_ANY);
297
298 if (pid_bound) {
299 fob = flow_mgr_get_fob_by_pid(fm, pid);
300 flow_owner_bucket_attach_common(fob, nx_port);
301 } else {
302 uint32_t i;
303 /*
304 * Otherwise, this can get expensive since we need to search
305 * thru all proc-mapping buckets to find the flows that are
306 * related to this nexus port.
307 */
308 for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
309 fob = flow_mgr_get_fob_at_idx(fm, i);
310 flow_owner_bucket_attach_common(fob, nx_port);
311 }
312 }
313 }
314
315 uint32_t
flow_owner_detach_nexus_port(struct flow_mgr * fm,boolean_t pid_bound,pid_t pid,nexus_port_t nx_port,boolean_t if_idle)316 flow_owner_detach_nexus_port(struct flow_mgr *fm, boolean_t pid_bound,
317 pid_t pid, nexus_port_t nx_port, boolean_t if_idle)
318 {
319 struct flow_owner_bucket *fob;
320 uint32_t purged = 0;
321 ASSERT(nx_port != NEXUS_PORT_ANY);
322
323 if (pid_bound) {
324 fob = flow_mgr_get_fob_by_pid(fm, pid);
325 purged = flow_owner_bucket_purge_common(fob, nx_port, if_idle);
326 } else {
327 uint32_t i;
328 /*
329 * Otherwise, this can get expensive since we need to search
330 * thru all proc-mapping buckets to find the flows that are
331 * related to this nexus port.
332 */
333 for (i = 0; i < fm->fm_owner_buckets_cnt; i++) {
334 fob = flow_mgr_get_fob_at_idx(fm, i);
335 purged += flow_owner_bucket_purge_common(fob,
336 nx_port, if_idle);
337 }
338 }
339 return purged;
340 }
341
342 /* 64-bit mask with range */
343 #define FO_BMASK64(_beg, _end) \
344 ((((uint64_t)0xffffffffffffffff) >> \
345 (63 - (_end))) & ~((1ULL << (_beg)) - 1))
346
347 struct flow_owner *
flow_owner_alloc(struct flow_owner_bucket * fob,struct proc * p,nexus_port_t nx_port,bool nx_port_pid_bound,bool flowadv,struct nx_flowswitch * fsw,struct nexus_adapter * nx_port_na,void * context,bool low_latency)348 flow_owner_alloc(struct flow_owner_bucket *fob, struct proc *p,
349 nexus_port_t nx_port, bool nx_port_pid_bound, bool flowadv,
350 struct nx_flowswitch *fsw, struct nexus_adapter *nx_port_na,
351 void *context, bool low_latency)
352 {
353 struct flow_owner *fo;
354 const pid_t pid = proc_pid(p);
355
356 static_assert(true == 1);
357 static_assert(false == 0);
358 ASSERT(low_latency == true || low_latency == false);
359 ASSERT(nx_port != NEXUS_PORT_ANY);
360 FOB_LOCK_ASSERT_HELD(fob);
361
362 #if DEBUG
363 ASSERT(flow_owner_find_by_pid(fob, pid, context, low_latency) == NULL);
364 RB_FOREACH(fo, flow_owner_tree, &fob->fob_owner_head) {
365 if (!fo->fo_nx_port_destroyed && (fo->fo_nx_port == nx_port)) {
366 VERIFY(0);
367 /* NOTREACHED */
368 __builtin_unreachable();
369 }
370 }
371 #endif /* DEBUG */
372
373 fo = fo_alloc(TRUE);
374 if (fo != NULL) {
375 if (flowadv) {
376 uint32_t i;
377 bitmap_t *bmap;
378
379 bmap = skmem_cache_alloc(sk_fab_cache, SKMEM_SLEEP);
380 if (bmap == NULL) {
381 SK_ERR("failed to alloc flow advisory bitmap");
382 fo_free(fo);
383 return NULL;
384 }
385 bzero(bmap, sk_fab_size);
386 fo->fo_flowadv_bmap = bmap;
387 fo->fo_num_flowadv_bmaps = sk_fadv_nchunks;
388 fo->fo_flowadv_max = sk_max_flows;
389
390 /* set the bits for free indices */
391 for (i = 0; i < sk_fadv_nchunks; i++) {
392 uint32_t end = 63;
393
394 if (i == (sk_fadv_nchunks - 1)) {
395 end = ((sk_max_flows - 1) %
396 FO_FLOWADV_CHUNK);
397 }
398
399 fo->fo_flowadv_bmap[i] = FO_BMASK64(0, end);
400 }
401 }
402 RB_INIT(&fo->fo_flow_entry_id_head);
403 /* const override */
404 *(struct flow_owner_bucket **)(uintptr_t)&fo->fo_bucket = fob;
405 fo->fo_context = context;
406 fo->fo_pid = pid;
407 (void) snprintf(fo->fo_name, sizeof(fo->fo_name), "%s",
408 proc_name_address(p));
409 fo->fo_nx_port_pid_bound = nx_port_pid_bound;
410 fo->fo_low_latency = low_latency;
411 fo->fo_nx_port = nx_port;
412 *(struct nexus_adapter **)(uintptr_t)&fo->fo_nx_port_na = nx_port_na;
413 *(struct nx_flowswitch **)(uintptr_t)&fo->fo_fsw = fsw;
414 RB_INSERT(flow_owner_tree, &fob->fob_owner_head, fo);
415
416 SK_DF(SK_VERB_FLOW, "%s(%d) fob %p added fo %p "
417 "nx_port %d nx_port_pid_bound %d ll %d nx_port_na %p",
418 fo->fo_name, fo->fo_pid, SK_KVA(fob), SK_KVA(fo),
419 (int)nx_port, nx_port_pid_bound, fo->fo_low_latency,
420 SK_KVA(nx_port_na));
421 }
422
423 return fo;
424 }
425
426 void
flow_owner_free(struct flow_owner_bucket * fob,struct flow_owner * fo)427 flow_owner_free(struct flow_owner_bucket *fob, struct flow_owner *fo)
428 {
429 FOB_LOCK_ASSERT_HELD(fob);
430
431 ASSERT(fo->fo_bucket == fob);
432 *(struct flow_owner_bucket **)(uintptr_t)&fo->fo_bucket = NULL;
433 RB_REMOVE(flow_owner_tree, &fob->fob_owner_head, fo);
434
435 ASSERT(fo->fo_num_flowadv == 0);
436 skmem_cache_free(sk_fab_cache, fo->fo_flowadv_bmap);
437 fo->fo_flowadv_bmap = NULL;
438 fo->fo_num_flowadv_bmaps = 0;
439
440 /* wake up any thread blocked in flow_owner_bucket_destroy() */
441 if (RB_EMPTY(&fob->fob_owner_head) && fob->fob_dtor_waiters > 0) {
442 fob->fob_dtor_waiters = 0;
443 wakeup(&fob->fob_dtor_waiters);
444 }
445
446 SK_DF(SK_VERB_FLOW, "%s(%d) fob %p removed fo %p nx_port %d",
447 fo->fo_name, fo->fo_pid, SK_KVA(fob), SK_KVA(fo),
448 (int)fo->fo_nx_port);
449
450 fo_free(fo);
451 }
452
453 int
flow_owner_flowadv_index_alloc(struct flow_owner * fo,flowadv_idx_t * fadv_idx)454 flow_owner_flowadv_index_alloc(struct flow_owner *fo, flowadv_idx_t *fadv_idx)
455 {
456 bitmap_t *bmap = fo->fo_flowadv_bmap;
457 size_t nchunks, i, j, idx = FLOWADV_IDX_NONE;
458
459 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
460 ASSERT(fo->fo_flowadv_max != 0);
461
462 nchunks = P2ROUNDUP(fo->fo_flowadv_max, FO_FLOWADV_CHUNK) /
463 FO_FLOWADV_CHUNK;
464
465 for (i = 0; i < nchunks; i++) {
466 j = ffsll(bmap[i]);
467 if (j == 0) {
468 /* All indices in this chunk are in use */
469 continue;
470 }
471 --j;
472 /* mark the index as in use */
473 bit_clear(bmap[i], j);
474 idx = (i * FO_FLOWADV_CHUNK) + j;
475 break;
476 }
477
478 if (idx == FLOWADV_IDX_NONE) {
479 SK_ERR("%s(%d) flow advisory table full: num %u max %u",
480 fo->fo_name, fo->fo_pid, fo->fo_num_flowadv,
481 fo->fo_flowadv_max);
482 VERIFY(fo->fo_num_flowadv == fo->fo_flowadv_max);
483 *fadv_idx = FLOWADV_IDX_NONE;
484 return ENOMEM;
485 }
486
487 fo->fo_num_flowadv++;
488 ASSERT(idx < ((flowadv_idx_t) -1));
489 *fadv_idx = (flowadv_idx_t)idx;
490 ASSERT(*fadv_idx < fo->fo_flowadv_max);
491 return 0;
492 }
493
494 void
flow_owner_flowadv_index_free(struct flow_owner * fo,flowadv_idx_t fadv_idx)495 flow_owner_flowadv_index_free(struct flow_owner *fo, flowadv_idx_t fadv_idx)
496 {
497 uint32_t chunk_idx, bit_pos;
498 bitmap_t *bmap = fo->fo_flowadv_bmap;
499
500 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
501 ASSERT(fo->fo_num_flowadv != 0);
502 ASSERT((fo->fo_flowadv_max != 0) && (fadv_idx < fo->fo_flowadv_max));
503
504 chunk_idx = fadv_idx / FO_FLOWADV_CHUNK;
505 bit_pos = fadv_idx % FO_FLOWADV_CHUNK;
506 ASSERT(!bit_test(bmap[chunk_idx], bit_pos));
507 /* mark the index as free */
508 bit_set(bmap[chunk_idx], bit_pos);
509 fo->fo_num_flowadv--;
510 }
511
512 int
flow_owner_destroy_entry(struct flow_owner * fo,uuid_t uuid,bool nolinger,void * close_params)513 flow_owner_destroy_entry(struct flow_owner *fo, uuid_t uuid,
514 bool nolinger, void *close_params)
515 {
516 struct flow_entry *fe = NULL;
517 int err = 0;
518
519 FOB_LOCK_ASSERT_HELD(FO_BUCKET(fo));
520
521 /* lookup such flow for this process */
522 fe = flow_entry_find_by_uuid(fo, uuid);
523 if (fe == NULL) {
524 err = ENOENT;
525 } else {
526 /* free flow entry (OK to linger if caller asked) */
527 flow_entry_destroy(fo, fe, nolinger, close_params);
528 }
529
530 return err;
531 }
532
533 static inline int
fo_cmp(const struct flow_owner * a,const struct flow_owner * b)534 fo_cmp(const struct flow_owner *a, const struct flow_owner *b)
535 {
536 if (a->fo_pid > b->fo_pid) {
537 return 1;
538 }
539 if (a->fo_pid < b->fo_pid) {
540 return -1;
541 }
542 if ((intptr_t)a->fo_context > (intptr_t)b->fo_context) {
543 return 1;
544 } else if ((intptr_t)a->fo_context < (intptr_t)b->fo_context) {
545 return -1;
546 }
547 if (a->fo_low_latency != b->fo_low_latency) {
548 if (a->fo_low_latency) {
549 return 1;
550 } else {
551 return -1;
552 }
553 }
554 return 0;
555 }
556
557 static struct flow_owner *
fo_alloc(boolean_t can_block)558 fo_alloc(boolean_t can_block)
559 {
560 struct flow_owner *fo;
561
562 fo = skmem_cache_alloc(sk_fo_cache,
563 can_block ? SKMEM_SLEEP : SKMEM_NOSLEEP);
564 if (fo == NULL) {
565 return NULL;
566 }
567
568 bzero(fo, sk_fo_size);
569
570 SK_DF(SK_VERB_MEM, "fo %p ALLOC", SK_KVA(fo));
571
572 return fo;
573 }
574
575 static void
fo_free(struct flow_owner * fo)576 fo_free(struct flow_owner *fo)
577 {
578 ASSERT(fo->fo_bucket == NULL);
579 ASSERT(RB_EMPTY(&fo->fo_flow_entry_id_head));
580 ASSERT(fo->fo_flowadv_bmap == NULL);
581
582 SK_DF(SK_VERB_MEM, "fo %p FREE", SK_KVA(fo));
583
584 skmem_cache_free(sk_fo_cache, fo);
585 }
586