1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/upipe/nx_user_pipe.h>
31 #include <skywalk/nexus/kpipe/nx_kernel_pipe.h>
32 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
33 #include <skywalk/nexus/netif/nx_netif.h>
34 #include <skywalk/nexus/monitor/nx_monitor.h>
35
36 static STAILQ_HEAD(, nxdom) nexus_domains =
37 STAILQ_HEAD_INITIALIZER(nexus_domains);
38
39 static void nxdom_attach(struct nxdom *);
40 static void nxdom_detach(struct nxdom *);
41 static void nxdom_init(struct nxdom *);
42 static void nxdom_terminate(struct nxdom *);
43 static void nxdom_fini(struct nxdom *);
44 static void nxdom_del_provider_final(struct kern_nexus_domain_provider *);
45
46 static int nxdom_prov_ext_init(struct kern_nexus_domain_provider *);
47 static void nxdom_prov_ext_fini(struct kern_nexus_domain_provider *);
48 static struct kern_nexus_domain_provider *nxdom_prov_alloc(zalloc_flags_t);
49 static void nxdom_prov_free(struct kern_nexus_domain_provider *);
50
51 static uint32_t nxprov_bound_var(uint32_t *, uint32_t, uint32_t, uint32_t,
52 const char *);
53 static void nxprov_detaching_enqueue(struct kern_nexus_domain_provider *);
54 static struct kern_nexus_domain_provider *nxprov_detaching_dequeue(void);
55 static void nxprov_detacher(void *, wait_result_t);
56 static int nxprov_detacher_cont(int);
57
58 static struct nexus_controller *ncd_alloc(zalloc_flags_t);
59 static void ncd_free(struct nexus_controller *);
60
61 static struct nexus_attr *nxa_alloc(zalloc_flags_t);
62 static void nxa_free(struct nexus_attr *);
63
64 static int _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid,
65 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
66 uuid_t *nx_if_uuid);
67
68 static SKMEM_TYPE_DEFINE(ncd_zone, struct nexus_controller);
69
70 static SKMEM_TYPE_DEFINE(nxdom_prov_zone, struct kern_nexus_domain_provider);
71
72 static SKMEM_TYPE_DEFINE(nxa_zone, struct nexus_attr);
73
74 static int __nxdom_inited = 0;
75 static STAILQ_HEAD(, kern_nexus_domain_provider) nxprov_detaching_head =
76 STAILQ_HEAD_INITIALIZER(nxprov_detaching_head);
77 static uint32_t nxprov_detaching_cnt;
78 static void *nxprov_detach_wchan; /* wait channel for detacher */
79
80 /*
81 * Array of default nexus domain providers. Initialized once during
82 * domain attach time; no lock is needed to read as they can be treated
83 * as immutables, since default providers imply built-in ones and they
84 * never detach in practice.
85 */
86 struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX];
87
88 void
nxdom_attach_all(void)89 nxdom_attach_all(void)
90 {
91 struct nxdom *nxdom;
92 thread_t __single tp = THREAD_NULL;
93
94 SK_LOCK_ASSERT_HELD();
95 ASSERT(!__nxdom_inited);
96 ASSERT(STAILQ_EMPTY(&nexus_domains));
97
98 #if CONFIG_NEXUS_FLOWSWITCH
99 nxdom_attach(&nx_flowswitch_dom_s);
100 #endif /* CONFIG_NEXUS_FLOWSWITCH */
101 #if CONFIG_NEXUS_USER_PIPE
102 nxdom_attach(&nx_upipe_dom_s);
103 #endif /* CONFIG_NEXUS_USER_PIPE */
104 #if CONFIG_NEXUS_KERNEL_PIPE
105 nxdom_attach(&nx_kpipe_dom_s);
106 #endif /* CONFIG_NEXUS_KERNEL_PIPE */
107 #if CONFIG_NEXUS_NETIF
108 nxdom_attach(&nx_netif_dom_s);
109 #endif /* CONFIG_NEXUS_NETIF */
110 #if CONFIG_NEXUS_MONITOR
111 nxdom_attach(&nx_monitor_dom_s);
112 #endif /* CONFIG_NEXUS_MONITOR */
113
114 /* ask domains to initialize */
115 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link)
116 nxdom_init(nxdom);
117
118 if (kernel_thread_start(nxprov_detacher, NULL, &tp) != KERN_SUCCESS) {
119 panic_plain("%s: couldn't create detacher thread", __func__);
120 /* NOTREACHED */
121 __builtin_unreachable();
122 }
123 thread_deallocate(tp);
124
125 __nxdom_inited = 1;
126 }
127
128 void
nxdom_detach_all(void)129 nxdom_detach_all(void)
130 {
131 struct nxdom *nxdom, *tnxdom;
132
133 SK_LOCK_ASSERT_HELD();
134
135 if (__nxdom_inited) {
136 STAILQ_FOREACH_SAFE(nxdom, &nexus_domains, nxdom_link, tnxdom) {
137 nxdom_terminate(nxdom);
138 nxdom_fini(nxdom);
139 nxdom_detach(nxdom);
140 }
141
142 /*
143 * TODO: [email protected] -- terminate detacher thread.
144 */
145
146 __nxdom_inited = 0;
147 }
148 ASSERT(STAILQ_EMPTY(&nexus_domains));
149 }
150
151 #define ASSERT_NXDOM_PARAMS(_dom, _var) do { \
152 ASSERT(NXDOM_MIN(_dom, _var) <= NXDOM_MAX(_dom, _var)); \
153 ASSERT(NXDOM_DEF(_dom, _var) >= NXDOM_MIN(_dom, _var)); \
154 ASSERT(NXDOM_DEF(_dom, _var) <= NXDOM_MAX(_dom, _var)); \
155 } while (0)
156
157 static void
nxdom_attach(struct nxdom * nxdom)158 nxdom_attach(struct nxdom *nxdom)
159 {
160 struct nxdom *nxdom1;
161
162 SK_LOCK_ASSERT_HELD();
163 ASSERT(!(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED));
164
165 STAILQ_FOREACH(nxdom1, &nexus_domains, nxdom_link) {
166 if (nxdom1->nxdom_type == nxdom->nxdom_type) {
167 /* type must be unique; this is a programming error */
168 VERIFY(0);
169 /* NOTREACHED */
170 __builtin_unreachable();
171 }
172 }
173
174 /* verify this is a valid type */
175 switch (nxdom->nxdom_type) {
176 case NEXUS_TYPE_USER_PIPE:
177 case NEXUS_TYPE_KERNEL_PIPE:
178 case NEXUS_TYPE_NET_IF:
179 case NEXUS_TYPE_FLOW_SWITCH:
180 case NEXUS_TYPE_MONITOR:
181 break;
182
183 default:
184 VERIFY(0);
185 /* NOTREACHED */
186 __builtin_unreachable();
187 }
188
189 /* verify this is a valid metadata type */
190 switch (nxdom->nxdom_md_type) {
191 case NEXUS_META_TYPE_QUANTUM:
192 case NEXUS_META_TYPE_PACKET:
193 break;
194
195 default:
196 VERIFY(0);
197 /* NOTREACHED */
198 __builtin_unreachable();
199 }
200
201 /* verify this is a valid metadata subtype */
202 switch (nxdom->nxdom_md_subtype) {
203 case NEXUS_META_SUBTYPE_PAYLOAD:
204 case NEXUS_META_SUBTYPE_RAW:
205 break;
206
207 default:
208 VERIFY(0);
209 /* NOTREACHED */
210 __builtin_unreachable();
211 }
212
213 #if (DEVELOPMENT || DEBUG)
214 /*
215 * Override the default ring sizes for flowswitch if configured
216 * via boot-args. Each nexus provider instance can still change
217 * the values if so desired.
218 */
219 if (nxdom->nxdom_type == NEXUS_TYPE_FLOW_SWITCH) {
220 if (sk_txring_sz != 0) {
221 if (sk_txring_sz < NXDOM_MIN(nxdom, tx_slots)) {
222 sk_txring_sz = NXDOM_MIN(nxdom, tx_slots);
223 } else if (sk_txring_sz > NXDOM_MAX(nxdom, tx_slots)) {
224 sk_txring_sz = NXDOM_MAX(nxdom, tx_slots);
225 }
226 NXDOM_DEF(nxdom, tx_slots) = sk_txring_sz;
227 }
228 if (sk_rxring_sz != 0) {
229 if (sk_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) {
230 sk_rxring_sz = NXDOM_MIN(nxdom, rx_slots);
231 } else if (sk_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) {
232 sk_rxring_sz = NXDOM_MAX(nxdom, rx_slots);
233 }
234 NXDOM_DEF(nxdom, rx_slots) = sk_rxring_sz;
235 }
236 }
237 /*
238 * Override the default ring sizes for netif if configured
239 * via boot-args. Each nexus provider instance can still change
240 * the values if so desired.
241 */
242 if (nxdom->nxdom_type == NEXUS_TYPE_NET_IF) {
243 if (sk_net_txring_sz != 0) {
244 if (sk_net_txring_sz < NXDOM_MIN(nxdom, tx_slots)) {
245 sk_net_txring_sz = NXDOM_MIN(nxdom, tx_slots);
246 } else if (sk_net_txring_sz > NXDOM_MAX(nxdom, tx_slots)) {
247 sk_net_txring_sz = NXDOM_MAX(nxdom, tx_slots);
248 }
249 NXDOM_DEF(nxdom, tx_slots) = sk_net_txring_sz;
250 }
251 if (sk_net_rxring_sz != 0) {
252 if (sk_net_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) {
253 sk_net_rxring_sz = NXDOM_MIN(nxdom, rx_slots);
254 } else if (sk_net_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) {
255 sk_net_rxring_sz = NXDOM_MAX(nxdom, rx_slots);
256 }
257 NXDOM_DEF(nxdom, rx_slots) = sk_net_rxring_sz;
258 }
259 }
260
261 #endif /* DEVELOPMENT || DEBUG */
262
263 /* verify that parameters are sane */
264 ASSERT(NXDOM_MAX(nxdom, ports) > 0);
265 ASSERT(NXDOM_MAX(nxdom, ports) <= NEXUS_PORT_MAX);
266 ASSERT_NXDOM_PARAMS(nxdom, ports);
267 ASSERT_NXDOM_PARAMS(nxdom, tx_rings);
268 ASSERT_NXDOM_PARAMS(nxdom, rx_rings);
269 ASSERT(NXDOM_MAX(nxdom, tx_slots) > 0);
270 ASSERT_NXDOM_PARAMS(nxdom, tx_slots);
271 ASSERT(NXDOM_MAX(nxdom, rx_slots) > 0);
272 ASSERT_NXDOM_PARAMS(nxdom, rx_slots);
273 ASSERT_NXDOM_PARAMS(nxdom, buf_size);
274 ASSERT_NXDOM_PARAMS(nxdom, meta_size);
275 ASSERT_NXDOM_PARAMS(nxdom, pipes);
276 ASSERT_NXDOM_PARAMS(nxdom, extensions);
277
278 /* these must exist */
279 ASSERT(nxdom->nxdom_bind_port != NULL);
280 ASSERT(nxdom->nxdom_unbind_port != NULL);
281 ASSERT(nxdom->nxdom_connect != NULL);
282 ASSERT(nxdom->nxdom_disconnect != NULL);
283 ASSERT(nxdom->nxdom_defunct != NULL);
284 ASSERT(nxdom->nxdom_defunct_finalize != NULL);
285
286 STAILQ_INSERT_TAIL(&nexus_domains, nxdom, nxdom_link);
287 nxdom->nxdom_flags |= NEXUSDOMF_ATTACHED;
288 }
289
290 #undef VERIFY_NXDOM_PARAMS
291
292 static void
nxdom_detach(struct nxdom * nxdom)293 nxdom_detach(struct nxdom *nxdom)
294 {
295 SK_LOCK_ASSERT_HELD();
296 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
297
298 STAILQ_REMOVE(&nexus_domains, nxdom, nxdom, nxdom_link);
299 nxdom->nxdom_flags &= ~NEXUSDOMF_ATTACHED;
300 }
301
302 static void
nxdom_init(struct nxdom * nxdom)303 nxdom_init(struct nxdom *nxdom)
304 {
305 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
306
307 SK_LOCK_ASSERT_HELD();
308
309 if (!(nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED)) {
310 if (nxdom->nxdom_init != NULL) {
311 nxdom->nxdom_init(nxdom);
312 }
313 nxdom->nxdom_flags |= NEXUSDOMF_INITIALIZED;
314 }
315 }
316
317 static void
nxdom_terminate(struct nxdom * nxdom)318 nxdom_terminate(struct nxdom *nxdom)
319 {
320 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
321
322 SK_LOCK_ASSERT_HELD();
323
324 if ((nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) &&
325 !(nxdom->nxdom_flags & NEXUSDOMF_TERMINATED)) {
326 if (nxdom->nxdom_terminate != NULL) {
327 nxdom->nxdom_terminate(nxdom);
328 }
329 nxdom->nxdom_flags |= NEXUSDOMF_TERMINATED;
330 }
331 }
332
333 static void
nxdom_fini(struct nxdom * nxdom)334 nxdom_fini(struct nxdom *nxdom)
335 {
336 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
337
338 if (nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) {
339 if (nxdom->nxdom_fini != NULL) {
340 nxdom->nxdom_fini(nxdom);
341 }
342 nxdom->nxdom_flags &= ~NEXUSDOMF_INITIALIZED;
343 }
344 }
345
346 int
nxdom_prov_add(struct nxdom * nxdom,struct kern_nexus_domain_provider * nxdom_prov)347 nxdom_prov_add(struct nxdom *nxdom,
348 struct kern_nexus_domain_provider *nxdom_prov)
349 {
350 struct kern_nexus_domain_provider *nxprov1;
351 nexus_type_t type = nxdom->nxdom_type;
352 boolean_t builtin;
353 int err = 0;
354
355 SK_LOCK_ASSERT_HELD();
356 ASSERT(type < NEXUS_TYPE_MAX);
357
358 builtin = !(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT);
359
360 STAILQ_FOREACH(nxprov1, &nxdom->nxdom_prov_head, nxdom_prov_link) {
361 /*
362 * We can be a little more strict in the kernel and
363 * avoid namespace collision (even though each domain
364 * provider has UUID; this also guarantees that external
365 * providers won't conflict with the builtin ones.
366 */
367 if (strbufcmp(nxprov1->nxdom_prov_name, sizeof(nxprov1->nxdom_prov_name),
368 nxdom_prov->nxdom_prov_name, sizeof(nxdom_prov->nxdom_prov_name)) == 0) {
369 return EEXIST;
370 }
371 }
372
373 VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED));
374 VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED));
375
376 uuid_generate_random(nxdom_prov->nxdom_prov_uuid);
377 nxdom_prov->nxdom_prov_dom = nxdom;
378 if (nxdom_prov->nxdom_prov_init != NULL) {
379 err = nxdom_prov->nxdom_prov_init(nxdom_prov);
380 }
381
382 if (err == 0) {
383 nxdom_prov->nxdom_prov_flags |=
384 (NXDOMPROVF_ATTACHED | NXDOMPROVF_INITIALIZED);
385 STAILQ_INSERT_TAIL(&nxdom->nxdom_prov_head, nxdom_prov,
386 nxdom_prov_link);
387 /* for being in the list */
388 nxdom_prov_retain_locked(nxdom_prov);
389
390 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) {
391 VERIFY(builtin && nxdom_prov_default[type] == NULL);
392 nxdom_prov_default[type] = nxdom_prov;
393 /* for being in the array */
394 nxdom_prov_retain_locked(nxdom_prov);
395 }
396
397 SK_D("nxdom_prov 0x%llx (%s) dom %s",
398 SK_KVA(nxdom_prov), nxdom_prov->nxdom_prov_name,
399 nxdom->nxdom_name);
400 } else {
401 uuid_clear(nxdom_prov->nxdom_prov_uuid);
402 nxdom_prov->nxdom_prov_dom = NULL;
403 }
404
405 return err;
406 }
407
408 void
nxdom_prov_del(struct kern_nexus_domain_provider * nxdom_prov)409 nxdom_prov_del(struct kern_nexus_domain_provider *nxdom_prov)
410 {
411 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
412 nexus_type_t type = nxdom->nxdom_type;
413
414 SK_LOCK_ASSERT_HELD();
415 ASSERT(type < NEXUS_TYPE_MAX);
416 ASSERT(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED);
417
418 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DETACHING) {
419 return;
420 }
421
422 SK_D("nxdom_prov 0x%llx (%s:%s)", SK_KVA(nxdom_prov), nxdom->nxdom_name,
423 nxdom_prov->nxdom_prov_name);
424
425 /* keep the reference around for the detaching list (see below) */
426 STAILQ_REMOVE(&nxdom->nxdom_prov_head, nxdom_prov,
427 kern_nexus_domain_provider, nxdom_prov_link);
428 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_ATTACHED;
429 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_DETACHING;
430
431 /* there can only be one default and it must match this one */
432 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) {
433 ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT));
434 VERIFY(nxdom_prov_default[type] == nxdom_prov);
435 nxdom_prov_default[type] = NULL;
436 /*
437 * Release reference held for the array; this must
438 * not be the last reference, as there is still at
439 * least one which we kept for the detaching list.
440 */
441 VERIFY(!nxdom_prov_release_locked(nxdom_prov));
442 }
443
444 /* add to detaching list and wake up detacher */
445 nxprov_detaching_enqueue(nxdom_prov);
446 }
447
448 static void
nxdom_del_provider_final(struct kern_nexus_domain_provider * nxdom_prov)449 nxdom_del_provider_final(struct kern_nexus_domain_provider *nxdom_prov)
450 {
451 #if (DEBUG || DEVELOPMENT)
452 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
453 #endif /* DEBUG || DEVELOPMENT */
454
455 SK_LOCK_ASSERT_HELD();
456
457 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
458 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
459 ASSERT(nxdom != NULL);
460
461 SK_D("nxdom_prov 0x%llx (%s:%s)", SK_KVA(nxdom_prov), nxdom->nxdom_name,
462 nxdom_prov->nxdom_prov_name);
463
464 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_DETACHING;
465
466 /*
467 * Release reference held for detaching list; if this is the last
468 * reference, the domain provider's nxdom_prov_fini() callback will
469 * be called (if applicable) within the detacher thread's context.
470 * Otherwise, this will occur when the last nexus provider for that
471 * domain provider has been released.
472 */
473 (void) nxdom_prov_release_locked(nxdom_prov);
474 }
475
476 struct nxdom *
nxdom_find(nexus_type_t type)477 nxdom_find(nexus_type_t type)
478 {
479 struct nxdom *nxdom;
480
481 SK_LOCK_ASSERT_HELD();
482 ASSERT(type < NEXUS_TYPE_MAX);
483
484 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) {
485 if (nxdom->nxdom_type == type) {
486 break;
487 }
488 }
489
490 return nxdom;
491 }
492
493 struct kern_nexus_domain_provider *
nxdom_prov_find(const struct nxdom * nxdom,const char * name)494 nxdom_prov_find(const struct nxdom *nxdom, const char *name)
495 {
496 struct kern_nexus_domain_provider *nxdom_prov = NULL;
497
498 SK_LOCK_ASSERT_HELD();
499
500 if (name != NULL) {
501 STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head,
502 nxdom_prov_link) {
503 if (strlcmp(nxdom_prov->nxdom_prov_name, name,
504 sizeof(nxdom_prov->nxdom_prov_name)) == 0) {
505 break;
506 }
507 }
508 }
509
510 if (nxdom_prov != NULL) {
511 nxdom_prov_retain_locked(nxdom_prov); /* for caller */
512 }
513 return nxdom_prov;
514 }
515
516 struct kern_nexus_domain_provider *
nxdom_prov_find_uuid(const uuid_t dom_prov_uuid)517 nxdom_prov_find_uuid(const uuid_t dom_prov_uuid)
518 {
519 struct kern_nexus_domain_provider *nxdom_prov = NULL;
520 struct nxdom *nxdom;
521
522 SK_LOCK_ASSERT_HELD();
523 ASSERT(dom_prov_uuid != NULL && !uuid_is_null(dom_prov_uuid));
524
525 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) {
526 STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head,
527 nxdom_prov_link) {
528 ASSERT(!uuid_is_null(nxdom_prov->nxdom_prov_uuid));
529 if (uuid_compare(nxdom_prov->nxdom_prov_uuid,
530 dom_prov_uuid) == 0) {
531 break;
532 }
533 }
534 if (nxdom_prov != NULL) {
535 nxdom_prov_retain_locked(nxdom_prov); /* for caller */
536 break;
537 }
538 }
539
540 return nxdom_prov;
541 }
542
543 errno_t
kern_nexus_register_domain_provider(const nexus_type_t type,const nexus_domain_provider_name_t name,const struct kern_nexus_domain_provider_init * init,const uint32_t init_len,uuid_t * dom_prov_uuid)544 kern_nexus_register_domain_provider(const nexus_type_t type,
545 const nexus_domain_provider_name_t name,
546 const struct kern_nexus_domain_provider_init *init,
547 const uint32_t init_len, uuid_t *dom_prov_uuid)
548 {
549 struct kern_nexus_domain_provider *nxdom_prov = NULL;
550 struct nxdom *nxdom;
551 errno_t err = 0;
552
553 _CASSERT(sizeof(*init) == sizeof(nxdom_prov->nxdom_prov_ext));
554
555 if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) {
556 return EINVAL;
557 }
558
559 uuid_clear(*dom_prov_uuid);
560
561 if (name == NULL || init == NULL || init_len < sizeof(*init) ||
562 init->nxdpi_version != KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION) {
563 return EINVAL;
564 }
565
566 /*
567 * init, fini are required.
568 */
569 if (init->nxdpi_init == NULL || init->nxdpi_fini == NULL) {
570 return EINVAL;
571 }
572
573 SK_LOCK();
574 if (nxdom_prov_default[type] == NULL) {
575 err = ENXIO;
576 goto done;
577 }
578
579 nxdom = nxdom_find(type);
580 if (nxdom == NULL) {
581 err = ENXIO;
582 goto done;
583 }
584
585 /*
586 * Allow only kernel pipe and netif external domain providers for
587 * now, until we understand the implications and requirements for
588 * supporting other domain types. For all other types, using
589 * the built-in domain providers and registering nexus should
590 * suffice.
591 */
592 if (nxdom->nxdom_type != NEXUS_TYPE_KERNEL_PIPE &&
593 nxdom->nxdom_type != NEXUS_TYPE_NET_IF) {
594 err = EINVAL;
595 goto done;
596 }
597
598 nxdom_prov = nxdom_prov_alloc(Z_WAITOK);
599
600 /*
601 * Point all callback routines to the default provider for this
602 * domain; for nxdom_prov{init,fini}, refer to externally-provided
603 * callback routines, if applicable.
604 */
605 bcopy(init, &nxdom_prov->nxdom_prov_ext, sizeof(*init));
606 bcopy(&nxdom_prov_default[type]->nxdom_prov_cb,
607 &nxdom_prov->nxdom_prov_cb, sizeof(struct nxdom_prov_cb));
608 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT;
609 nxdom_prov->nxdom_prov_init = nxdom_prov_ext_init;
610 nxdom_prov->nxdom_prov_fini = nxdom_prov_ext_fini;
611 (void) snprintf(nxdom_prov->nxdom_prov_name,
612 sizeof(nxdom_prov->nxdom_prov_name), "%s", name);
613
614 ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT));
615 err = nxdom_prov_add(nxdom, nxdom_prov);
616 if (err != 0) {
617 nxdom_prov_free(nxdom_prov);
618 nxdom_prov = NULL;
619 }
620
621 done:
622 if (nxdom_prov != NULL) {
623 ASSERT(err == 0 && !uuid_is_null(nxdom_prov->nxdom_prov_uuid));
624 uuid_copy(*dom_prov_uuid, nxdom_prov->nxdom_prov_uuid);
625 }
626 SK_UNLOCK();
627
628 return err;
629 }
630
631 errno_t
kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid)632 kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid)
633 {
634 struct kern_nexus_domain_provider *nxdom_prov = NULL;
635 errno_t err = 0;
636
637 if (dom_prov_uuid == NULL || uuid_is_null(dom_prov_uuid)) {
638 return EINVAL;
639 }
640
641 SK_LOCK();
642 nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid);
643 if (nxdom_prov == NULL) {
644 err = ENXIO;
645 goto done;
646 }
647
648 /* don't allow external request for built-in domain providers */
649 if (!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT)) {
650 err = EINVAL;
651 goto done;
652 }
653
654 /* schedule this to be deleted */
655 nxdom_prov_del(nxdom_prov);
656 done:
657 /* release reference from nxdom_prov_find_uuid */
658 if (nxdom_prov != NULL) {
659 (void) nxdom_prov_release_locked(nxdom_prov);
660 }
661 SK_UNLOCK();
662
663 return err;
664 }
665
666 errno_t
kern_nexus_get_default_domain_provider(const nexus_type_t type,uuid_t * dom_prov_uuid)667 kern_nexus_get_default_domain_provider(const nexus_type_t type,
668 uuid_t *dom_prov_uuid)
669 {
670 struct kern_nexus_domain_provider *nxdom_prov;
671
672 if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) {
673 return EINVAL;
674 }
675
676 uuid_clear(*dom_prov_uuid);
677
678 /* no lock is needed; array is immutable */
679 if ((nxdom_prov = nxdom_prov_default[type]) == NULL) {
680 return ENXIO;
681 }
682
683 uuid_copy(*dom_prov_uuid, nxdom_prov->nxdom_prov_uuid);
684
685 return 0;
686 }
687
688 static int
nxdom_prov_ext_init(struct kern_nexus_domain_provider * nxdom_prov)689 nxdom_prov_ext_init(struct kern_nexus_domain_provider *nxdom_prov)
690 {
691 int err = 0;
692
693 SK_D("initializing %s", nxdom_prov->nxdom_prov_name);
694
695 ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_init != NULL);
696 if ((err = nxdom_prov->nxdom_prov_ext.nxdpi_init(nxdom_prov)) == 0) {
697 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT_INITED;
698 }
699
700 return err;
701 }
702
703 static void
nxdom_prov_ext_fini(struct kern_nexus_domain_provider * nxdom_prov)704 nxdom_prov_ext_fini(struct kern_nexus_domain_provider *nxdom_prov)
705 {
706 SK_D("destroying %s", nxdom_prov->nxdom_prov_name);
707
708 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT_INITED) {
709 ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_fini != NULL);
710 nxdom_prov->nxdom_prov_ext.nxdpi_fini(nxdom_prov);
711 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_EXT_INITED;
712 }
713 }
714
715 static struct nexus_attr *
nxa_alloc(zalloc_flags_t how)716 nxa_alloc(zalloc_flags_t how)
717 {
718 return zalloc_flags(nxa_zone, how | Z_ZERO);
719 }
720
721 static void
nxa_free(struct nexus_attr * nxa)722 nxa_free(struct nexus_attr *nxa)
723 {
724 SK_DF(SK_VERB_MEM, "nxa 0x%llx FREE", SK_KVA(nxa));
725 zfree(nxa_zone, nxa);
726 }
727
728 errno_t
kern_nexus_attr_create(nexus_attr_t * nxa)729 kern_nexus_attr_create(nexus_attr_t *nxa)
730 {
731 errno_t err = 0;
732
733 if (nxa == NULL) {
734 err = EINVAL;
735 } else {
736 *nxa = nxa_alloc(Z_WAITOK);
737 }
738 return err;
739 }
740
741 errno_t
kern_nexus_attr_clone(const nexus_attr_t nxa,nexus_attr_t * nnxa)742 kern_nexus_attr_clone(const nexus_attr_t nxa, nexus_attr_t *nnxa)
743 {
744 errno_t err = 0;
745
746 if (nnxa == NULL) {
747 err = EINVAL;
748 } else {
749 err = kern_nexus_attr_create(nnxa);
750 if (err == 0 && nxa != NULL) {
751 ASSERT(*nnxa != NULL);
752 bcopy(nxa, *nnxa, sizeof(**nnxa));
753 }
754 }
755 return err;
756 }
757
758 errno_t
kern_nexus_attr_set(const nexus_attr_t nxa,const nexus_attr_type_t type,const uint64_t value)759 kern_nexus_attr_set(const nexus_attr_t nxa,
760 const nexus_attr_type_t type, const uint64_t value)
761 {
762 return __nexus_attr_set(nxa, type, value);
763 }
764
765 errno_t
kern_nexus_attr_get(nexus_attr_t nxa,const nexus_attr_type_t type,uint64_t * value)766 kern_nexus_attr_get(nexus_attr_t nxa, const nexus_attr_type_t type,
767 uint64_t *value)
768 {
769 return __nexus_attr_get(nxa, type, value);
770 }
771
772 void
kern_nexus_attr_destroy(nexus_attr_t nxa)773 kern_nexus_attr_destroy(nexus_attr_t nxa)
774 {
775 nxa_free(nxa);
776 }
777
778 static struct nexus_controller *
ncd_alloc(zalloc_flags_t how)779 ncd_alloc(zalloc_flags_t how)
780 {
781 return zalloc_flags(ncd_zone, how | Z_ZERO);
782 }
783
784 static void
ncd_free(struct nexus_controller * ncd)785 ncd_free(struct nexus_controller *ncd)
786 {
787 SK_DF(SK_VERB_MEM, "ncd 0x%llx FREE", SK_KVA(ncd));
788 zfree(ncd_zone, ncd);
789 }
790
791 nexus_controller_t
kern_nexus_shared_controller(void)792 kern_nexus_shared_controller(void)
793 {
794 return &kernnxctl;
795 }
796
797 errno_t
kern_nexus_controller_create(nexus_controller_t * ncd)798 kern_nexus_controller_create(nexus_controller_t *ncd)
799 {
800 struct nxctl *nxctl = NULL;
801 uuid_t nxctl_uuid;
802 errno_t err = 0;
803
804 uuid_generate_random(nxctl_uuid);
805
806 if (ncd == NULL) {
807 err = EINVAL;
808 goto done;
809 } else {
810 *ncd = NULL;
811 }
812
813 nxctl = nxctl_create(kernproc, NULL, nxctl_uuid, &err);
814 if (nxctl == NULL) {
815 ASSERT(err != 0);
816 goto done;
817 }
818
819 *ncd = ncd_alloc(Z_WAITOK);
820 (*ncd)->ncd_nxctl = nxctl; /* ref from nxctl_create */
821
822 done:
823 if (err != 0) {
824 if (nxctl != NULL) {
825 nxctl_dtor(nxctl);
826 nxctl = NULL;
827 }
828 if (ncd != NULL && *ncd != NULL) {
829 ncd_free(*ncd);
830 *ncd = NULL;
831 }
832 }
833
834 return err;
835 }
836
837 #define NXPI_INVALID_CB_PAIRS(cb1, cb2) \
838 (!(init->nxpi_##cb1 == NULL && init->nxpi_##cb2 == NULL) && \
839 ((init->nxpi_##cb1 == NULL) ^ (init->nxpi_##cb2 == NULL)))
840
841 static errno_t
nexus_controller_register_provider_validate_init_params(const struct kern_nexus_provider_init * init,uint32_t init_len,nexus_type_t nxdom_type)842 nexus_controller_register_provider_validate_init_params(
843 const struct kern_nexus_provider_init *init, uint32_t init_len,
844 nexus_type_t nxdom_type)
845 {
846 errno_t err = 0;
847 struct kern_nexus_netif_provider_init *netif_init;
848
849 _CASSERT(__builtin_offsetof(struct kern_nexus_provider_init,
850 nxpi_version) == 0);
851 _CASSERT(sizeof(init->nxpi_version) == sizeof(uint32_t));
852
853 if (init == NULL) {
854 return 0;
855 }
856
857 if (init_len < sizeof(uint32_t)) {
858 return EINVAL;
859 }
860
861 switch (init->nxpi_version) {
862 case KERN_NEXUS_PROVIDER_VERSION_1:
863 if (init_len != sizeof(struct kern_nexus_provider_init)) {
864 err = EINVAL;
865 break;
866 }
867 /*
868 * sync_{tx,rx} callbacks are required; the rest of the
869 * callback pairs are optional, but must be symmetrical.
870 */
871 if (init->nxpi_sync_tx == NULL || init->nxpi_sync_rx == NULL ||
872 init->nxpi_pre_connect == NULL ||
873 init->nxpi_connected == NULL ||
874 init->nxpi_pre_disconnect == NULL ||
875 init->nxpi_disconnected == NULL ||
876 NXPI_INVALID_CB_PAIRS(ring_init, ring_fini) ||
877 NXPI_INVALID_CB_PAIRS(slot_init, slot_fini)) {
878 err = EINVAL;
879 break;
880 }
881 /*
882 * Tx doorbell interface is only supported for netif and
883 * Tx doorbell is mandatory for netif
884 */
885 if (((init->nxpi_tx_doorbell != NULL) &&
886 (nxdom_type != NEXUS_TYPE_NET_IF)) ||
887 ((nxdom_type == NEXUS_TYPE_NET_IF) &&
888 (init->nxpi_tx_doorbell == NULL))) {
889 err = EINVAL;
890 break;
891 }
892 /*
893 * Capabilities configuration interface is only supported for
894 * netif.
895 */
896 if ((init->nxpi_config_capab != NULL) &&
897 (nxdom_type != NEXUS_TYPE_NET_IF)) {
898 err = EINVAL;
899 break;
900 }
901 break;
902
903 case KERN_NEXUS_PROVIDER_VERSION_NETIF:
904 if (init_len != sizeof(struct kern_nexus_netif_provider_init)) {
905 err = EINVAL;
906 break;
907 }
908 if (nxdom_type != NEXUS_TYPE_NET_IF) {
909 err = EINVAL;
910 break;
911 }
912 netif_init =
913 __DECONST(struct kern_nexus_netif_provider_init *, init);
914 if (netif_init->nxnpi_pre_connect == NULL ||
915 netif_init->nxnpi_connected == NULL ||
916 netif_init->nxnpi_pre_disconnect == NULL ||
917 netif_init->nxnpi_disconnected == NULL ||
918 netif_init->nxnpi_qset_init == NULL ||
919 netif_init->nxnpi_qset_fini == NULL ||
920 netif_init->nxnpi_queue_init == NULL ||
921 netif_init->nxnpi_queue_fini == NULL ||
922 netif_init->nxnpi_tx_qset_notify == NULL ||
923 netif_init->nxnpi_config_capab == NULL) {
924 err = EINVAL;
925 break;
926 }
927 break;
928
929 default:
930 err = EINVAL;
931 break;
932 }
933 return err;
934 }
935
936 errno_t
kern_nexus_controller_register_provider(const nexus_controller_t ncd,const uuid_t dom_prov_uuid,const nexus_name_t name,const struct kern_nexus_provider_init * init,uint32_t init_len,const nexus_attr_t nxa,uuid_t * prov_uuid)937 kern_nexus_controller_register_provider(const nexus_controller_t ncd,
938 const uuid_t dom_prov_uuid, const nexus_name_t name,
939 const struct kern_nexus_provider_init *init, uint32_t init_len,
940 const nexus_attr_t nxa, uuid_t *prov_uuid)
941 {
942 struct kern_nexus_domain_provider *nxdom_prov = NULL;
943 struct kern_nexus_provider *nxprov = NULL;
944 nexus_type_t nxdom_type;
945 struct nxprov_reg reg;
946 struct nxctl *nxctl;
947 errno_t err = 0;
948
949 if (prov_uuid == NULL) {
950 return EINVAL;
951 }
952
953 uuid_clear(*prov_uuid);
954
955 if (ncd == NULL ||
956 dom_prov_uuid == NULL || uuid_is_null(dom_prov_uuid)) {
957 return EINVAL;
958 }
959
960 nxctl = ncd->ncd_nxctl;
961 NXCTL_LOCK(nxctl);
962 SK_LOCK();
963 nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid);
964 if (nxdom_prov == NULL) {
965 SK_UNLOCK();
966 err = ENXIO;
967 goto done;
968 }
969
970 nxdom_type = nxdom_prov->nxdom_prov_dom->nxdom_type;
971 ASSERT(nxdom_type < NEXUS_TYPE_MAX);
972
973 err = nexus_controller_register_provider_validate_init_params(init,
974 init_len, nxdom_type);
975 if (err != 0) {
976 SK_UNLOCK();
977 err = EINVAL;
978 goto done;
979 }
980
981 if ((err = __nexus_provider_reg_prepare(®,
982 __unsafe_null_terminated_from_indexable(name), nxdom_type, nxa)) != 0) {
983 SK_UNLOCK();
984 goto done;
985 }
986
987 if (init && init->nxpi_version == KERN_NEXUS_PROVIDER_VERSION_NETIF) {
988 reg.nxpreg_params.nxp_flags |= NXPF_NETIF_LLINK;
989 }
990
991 /* callee will hold reference on nxdom_prov upon success */
992 if ((nxprov = nxprov_create_kern(nxctl, nxdom_prov, ®,
993 init, &err)) == NULL) {
994 SK_UNLOCK();
995 ASSERT(err != 0);
996 goto done;
997 }
998 SK_UNLOCK();
999
1000 uuid_copy(*prov_uuid, nxprov->nxprov_uuid);
1001
1002 done:
1003 SK_LOCK_ASSERT_NOTHELD();
1004 NXCTL_UNLOCK(nxctl);
1005
1006 if (err != 0 && nxprov != NULL) {
1007 err = nxprov_close(nxprov, FALSE);
1008 }
1009
1010 /* release extra ref from nxprov_create_kern */
1011 if (nxprov != NULL) {
1012 nxprov_release(nxprov);
1013 }
1014 /* release extra ref from nxdom_prov_find_uuid */
1015 if (nxdom_prov != NULL) {
1016 (void) nxdom_prov_release(nxdom_prov);
1017 }
1018
1019 return err;
1020 }
1021
1022 #undef NXPI_INVALID_CB_PAIRS
1023
1024 errno_t
kern_nexus_controller_deregister_provider(const nexus_controller_t ncd,const uuid_t prov_uuid)1025 kern_nexus_controller_deregister_provider(const nexus_controller_t ncd,
1026 const uuid_t prov_uuid)
1027 {
1028 errno_t err;
1029
1030 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid)) {
1031 err = EINVAL;
1032 } else {
1033 struct nxctl *nxctl = ncd->ncd_nxctl;
1034 NXCTL_LOCK(nxctl);
1035 err = nxprov_destroy(nxctl, prov_uuid);
1036 NXCTL_UNLOCK(nxctl);
1037 }
1038 return err;
1039 }
1040
1041 errno_t
kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd,const uuid_t prov_uuid,const void * nx_ctx,nexus_ctx_release_fn_t nx_ctx_release,uuid_t * nx_uuid,const struct kern_nexus_init * init)1042 kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd,
1043 const uuid_t prov_uuid, const void *nx_ctx,
1044 nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid,
1045 const struct kern_nexus_init *init)
1046 {
1047 struct kern_nexus *nx = NULL;
1048 struct nxctl *nxctl;
1049 errno_t err = 0;
1050
1051 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1052 nx_uuid == NULL || init == NULL ||
1053 init->nxi_version != KERN_NEXUS_CURRENT_VERSION ||
1054 (init->nxi_rx_pbufpool != NULL &&
1055 init->nxi_rx_pbufpool != init->nxi_tx_pbufpool)) {
1056 err = EINVAL;
1057 goto done;
1058 }
1059
1060 nxctl = ncd->ncd_nxctl;
1061 NXCTL_LOCK(nxctl);
1062 nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_UNDEFINED, nx_ctx,
1063 nx_ctx_release, init->nxi_tx_pbufpool, init->nxi_rx_pbufpool, &err);
1064 NXCTL_UNLOCK(nxctl);
1065 if (nx == NULL) {
1066 ASSERT(err != 0);
1067 goto done;
1068 }
1069 ASSERT(err == 0);
1070 uuid_copy(*nx_uuid, nx->nx_uuid);
1071
1072 done:
1073 /* release extra ref from nx_create */
1074 if (nx != NULL) {
1075 (void) nx_release(nx);
1076 }
1077
1078 return err;
1079 }
1080
1081 errno_t
kern_nexus_controller_alloc_net_provider_instance(const nexus_controller_t ncd,const uuid_t prov_uuid,const void * nx_ctx,nexus_ctx_release_fn_t nx_ctx_release,uuid_t * nx_uuid,const struct kern_nexus_net_init * init,struct ifnet ** pifp)1082 kern_nexus_controller_alloc_net_provider_instance(
1083 const nexus_controller_t ncd, const uuid_t prov_uuid, const void *nx_ctx,
1084 nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid,
1085 const struct kern_nexus_net_init *init, struct ifnet **pifp)
1086 {
1087 struct kern_nexus *nx = NULL;
1088 struct ifnet *__single ifp = NULL;
1089 struct nxctl *nxctl;
1090 boolean_t nxctl_locked = FALSE;
1091 errno_t err = 0;
1092
1093 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1094 nx_uuid == NULL || init == NULL ||
1095 init->nxneti_version != KERN_NEXUS_NET_CURRENT_VERSION ||
1096 init->nxneti_eparams == NULL || pifp == NULL) {
1097 err = EINVAL;
1098 goto done;
1099 }
1100
1101 /*
1102 * Skywalk native interface doesn't support legacy model.
1103 */
1104 if ((init->nxneti_eparams->start != NULL) ||
1105 (init->nxneti_eparams->flags & IFNET_INIT_LEGACY) ||
1106 (init->nxneti_eparams->flags & IFNET_INIT_INPUT_POLL)) {
1107 err = EINVAL;
1108 goto done;
1109 }
1110
1111 /* create an embryonic ifnet */
1112 err = ifnet_allocate_extended(init->nxneti_eparams, &ifp);
1113 if (err != 0) {
1114 goto done;
1115 }
1116
1117 nxctl = ncd->ncd_nxctl;
1118 NXCTL_LOCK(nxctl);
1119 nxctl_locked = TRUE;
1120
1121 nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_NET_IF, nx_ctx,
1122 nx_ctx_release, init->nxneti_tx_pbufpool, init->nxneti_rx_pbufpool,
1123 &err);
1124 if (nx == NULL) {
1125 ASSERT(err != 0);
1126 goto done;
1127 }
1128
1129 if (NX_LLINK_PROV(nx)) {
1130 if (init->nxneti_llink == NULL) {
1131 SK_ERR("logical link configuration required");
1132 err = EINVAL;
1133 goto done;
1134 }
1135 err = nx_netif_default_llink_config(NX_NETIF_PRIVATE(nx),
1136 init->nxneti_llink);
1137 if (err != 0) {
1138 goto done;
1139 }
1140 }
1141
1142 /* prepare this ifnet instance if needed */
1143 if (init->nxneti_prepare != NULL) {
1144 err = init->nxneti_prepare(nx, ifp);
1145 if (err != 0) {
1146 goto done;
1147 }
1148 }
1149
1150 /* attach embryonic ifnet to nexus */
1151 /*
1152 * XXX -fbounds-safety: Update this once __counted_by_or_null is
1153 * available (rdar://75598414)
1154 */
1155 err = _kern_nexus_ifattach(nxctl, nx->nx_uuid, ifp,
1156 __unsafe_forge_bidi_indexable(unsigned char *, NULL, sizeof(uuid_t)),
1157 FALSE, NULL);
1158
1159 if (err != 0) {
1160 goto done;
1161 }
1162
1163 /* and finalize the ifnet attach */
1164 ASSERT(nxctl_locked);
1165 NXCTL_UNLOCK(nxctl);
1166 nxctl_locked = FALSE;
1167
1168 err = ifnet_attach(ifp, init->nxneti_lladdr);
1169 if (err != 0) {
1170 goto done;
1171 }
1172
1173 ASSERT(err == 0);
1174 /*
1175 * Return ifnet reference held by ifnet_allocate_extended();
1176 * caller is expected to retain this reference until its ifnet
1177 * detach callback is called.
1178 */
1179 *pifp = ifp;
1180 uuid_copy(*nx_uuid, nx->nx_uuid);
1181
1182 done:
1183 if (nxctl_locked) {
1184 NXCTL_UNLOCK(nxctl);
1185 }
1186
1187 /* release extra ref from nx_create */
1188 if (nx != NULL) {
1189 SK_LOCK();
1190 if (err != 0) {
1191 (void) nx_close(nx, TRUE);
1192 }
1193 (void) nx_release_locked(nx);
1194 SK_UNLOCK();
1195 }
1196 if (err != 0 && ifp != NULL) {
1197 ifnet_release(ifp);
1198 }
1199
1200 return err;
1201 }
1202
1203 errno_t
kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid)1204 kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd,
1205 const uuid_t nx_uuid)
1206 {
1207 errno_t err;
1208
1209 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1210 err = EINVAL;
1211 } else {
1212 struct nxctl *nxctl = ncd->ncd_nxctl;
1213 NXCTL_LOCK(nxctl);
1214 err = nx_destroy(nxctl, nx_uuid);
1215 NXCTL_UNLOCK(nxctl);
1216 }
1217 return err;
1218 }
1219
1220 errno_t
kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid,nexus_port_t * port,const pid_t pid,const uuid_t exec_uuid,const void * key,const uint32_t key_len,const uint32_t bind_flags)1221 kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd,
1222 const uuid_t nx_uuid, nexus_port_t *port, const pid_t pid,
1223 const uuid_t exec_uuid, const void *key, const uint32_t key_len,
1224 const uint32_t bind_flags)
1225 {
1226 struct nx_bind_req nbr;
1227 struct sockopt sopt;
1228 struct nxctl *nxctl;
1229 int err = 0;
1230
1231 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1232 port == NULL) {
1233 return EINVAL;
1234 }
1235
1236 __nexus_bind_req_prepare(&nbr, nx_uuid, *port, pid, exec_uuid,
1237 key, key_len, bind_flags);
1238
1239 bzero(&sopt, sizeof(sopt));
1240 sopt.sopt_dir = SOPT_SET;
1241 sopt.sopt_name = NXOPT_NEXUS_BIND;
1242 sopt.sopt_val = (user_addr_t)&nbr;
1243 sopt.sopt_valsize = sizeof(nbr);
1244 sopt.sopt_p = kernproc;
1245
1246 nxctl = ncd->ncd_nxctl;
1247 NXCTL_LOCK(nxctl);
1248 err = nxctl_set_opt(nxctl, &sopt);
1249 NXCTL_UNLOCK(nxctl);
1250
1251 if (err == 0) {
1252 *port = nbr.nb_port;
1253 }
1254
1255 return err;
1256 }
1257
1258 errno_t
kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid,const nexus_port_t port)1259 kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd,
1260 const uuid_t nx_uuid, const nexus_port_t port)
1261 {
1262 struct nx_unbind_req nbu;
1263 struct sockopt sopt;
1264 struct nxctl *nxctl;
1265 int err = 0;
1266
1267 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1268 return EINVAL;
1269 }
1270
1271 __nexus_unbind_req_prepare(&nbu, nx_uuid, port);
1272
1273 bzero(&sopt, sizeof(sopt));
1274 sopt.sopt_dir = SOPT_SET;
1275 sopt.sopt_name = NXOPT_NEXUS_UNBIND;
1276 sopt.sopt_val = (user_addr_t)&nbu;
1277 sopt.sopt_valsize = sizeof(nbu);
1278 sopt.sopt_p = kernproc;
1279
1280 nxctl = ncd->ncd_nxctl;
1281 NXCTL_LOCK(nxctl);
1282 err = nxctl_set_opt(nxctl, &sopt);
1283 NXCTL_UNLOCK(nxctl);
1284
1285 return err;
1286 }
1287
1288 errno_t
kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd,const uuid_t prov_uuid,nexus_attr_t nxa)1289 kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd,
1290 const uuid_t prov_uuid, nexus_attr_t nxa)
1291 {
1292 struct nxprov_reg_ent nre;
1293 struct nxprov_params *p = &nre.npre_prov_params;
1294 struct sockopt sopt;
1295 struct nxctl *nxctl;
1296 int err = 0;
1297
1298 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1299 nxa == NULL) {
1300 return EINVAL;
1301 }
1302
1303 bzero(&nre, sizeof(nre));
1304 bcopy(prov_uuid, nre.npre_prov_uuid, sizeof(uuid_t));
1305
1306 bzero(&sopt, sizeof(sopt));
1307 sopt.sopt_dir = SOPT_GET;
1308 sopt.sopt_name = NXOPT_NEXUS_PROV_ENTRY;
1309 sopt.sopt_val = (user_addr_t)&nre;
1310 sopt.sopt_valsize = sizeof(nre);
1311 sopt.sopt_p = kernproc;
1312
1313 nxctl = ncd->ncd_nxctl;
1314 NXCTL_LOCK(nxctl);
1315 err = nxctl_get_opt(nxctl, &sopt);
1316 NXCTL_UNLOCK(nxctl);
1317
1318 if (err == 0) {
1319 __nexus_attr_from_params(nxa, p);
1320 }
1321
1322 return err;
1323 }
1324
1325 void
kern_nexus_controller_destroy(nexus_controller_t ncd)1326 kern_nexus_controller_destroy(nexus_controller_t ncd)
1327 {
1328 struct nxctl *nxctl;
1329
1330 if (ncd == NULL) {
1331 return;
1332 }
1333
1334 nxctl = ncd->ncd_nxctl;
1335 ASSERT(nxctl != NULL);
1336 ncd->ncd_nxctl = NULL;
1337 nxctl_dtor(nxctl);
1338
1339 ncd_free(ncd);
1340 }
1341
1342 void *
kern_nexus_get_context(const kern_nexus_t nx)1343 kern_nexus_get_context(const kern_nexus_t nx)
1344 {
1345 return nx->nx_ctx;
1346 }
1347
1348 void
kern_nexus_stop(const kern_nexus_t nx)1349 kern_nexus_stop(const kern_nexus_t nx)
1350 {
1351 SK_LOCK();
1352 nx_stop(nx);
1353 SK_UNLOCK();
1354 }
1355
1356 errno_t
kern_nexus_get_pbufpool(const kern_nexus_t nx,kern_pbufpool_t * ptx_pp,kern_pbufpool_t * prx_pp)1357 kern_nexus_get_pbufpool(const kern_nexus_t nx, kern_pbufpool_t *ptx_pp,
1358 kern_pbufpool_t *prx_pp)
1359 {
1360 kern_pbufpool_t __single tpp = NULL, rpp = NULL;
1361 int err = 0;
1362
1363 if (ptx_pp == NULL && prx_pp == NULL) {
1364 return EINVAL;
1365 }
1366
1367 if (NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info == NULL) {
1368 err = ENOTSUP;
1369 } else {
1370 err = NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info(nx, &tpp, &rpp);
1371 }
1372
1373 if (ptx_pp != NULL) {
1374 *ptx_pp = tpp;
1375 }
1376 if (prx_pp != NULL) {
1377 *prx_pp = rpp;
1378 }
1379
1380 return err;
1381 }
1382
1383 static int
_kern_nexus_ifattach(struct nxctl * nxctl,const uuid_t nx_uuid,struct ifnet * ifp,const uuid_t nx_uuid_attachee,boolean_t host,uuid_t * nx_if_uuid)1384 _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid,
1385 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
1386 uuid_t *nx_if_uuid)
1387 {
1388 struct nx_cfg_req ncr;
1389 struct nx_spec_req nsr;
1390 struct sockopt sopt;
1391 int err = 0;
1392
1393 NXCTL_LOCK_ASSERT_HELD(nxctl);
1394
1395 if (nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1396 return EINVAL;
1397 }
1398
1399 bzero(&nsr, sizeof(nsr));
1400 if (ifp != NULL) {
1401 if (nx_uuid_attachee != NULL) {
1402 return EINVAL;
1403 }
1404
1405 nsr.nsr_flags = NXSPECREQ_IFP;
1406 nsr.nsr_ifp = ifp;
1407 } else {
1408 if (nx_uuid_attachee == NULL) {
1409 return EINVAL;
1410 }
1411
1412 nsr.nsr_flags = NXSPECREQ_UUID;
1413 if (host) {
1414 nsr.nsr_flags |= NXSPECREQ_HOST;
1415 }
1416
1417 uuid_copy(nsr.nsr_uuid, nx_uuid_attachee);
1418 }
1419 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_ATTACH,
1420 &nsr, sizeof(nsr));
1421
1422 bzero(&sopt, sizeof(sopt));
1423 sopt.sopt_dir = SOPT_SET;
1424 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1425 sopt.sopt_val = (user_addr_t)&ncr;
1426 sopt.sopt_valsize = sizeof(ncr);
1427 sopt.sopt_p = kernproc;
1428
1429 err = nxctl_set_opt(nxctl, &sopt);
1430 if (err == 0 && nx_if_uuid != NULL) {
1431 uuid_copy(*nx_if_uuid, nsr.nsr_if_uuid);
1432 }
1433
1434 return err;
1435 }
1436
1437 int
kern_nexus_ifattach(nexus_controller_t ncd,const uuid_t nx_uuid,struct ifnet * ifp,const uuid_t nx_uuid_attachee,boolean_t host,uuid_t * nx_if_uuid)1438 kern_nexus_ifattach(nexus_controller_t ncd, const uuid_t nx_uuid,
1439 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
1440 uuid_t *nx_if_uuid)
1441 {
1442 struct nxctl *nxctl;
1443 int err = 0;
1444
1445 if (ncd == NULL) {
1446 return EINVAL;
1447 }
1448
1449 nxctl = ncd->ncd_nxctl;
1450 ASSERT(nxctl != NULL);
1451 NXCTL_LOCK(nxctl);
1452 err = _kern_nexus_ifattach(nxctl, nx_uuid, ifp, nx_uuid_attachee,
1453 host, nx_if_uuid);
1454 NXCTL_UNLOCK(nxctl);
1455
1456 return err;
1457 }
1458
1459 int
kern_nexus_ifdetach(const nexus_controller_t ncd,const uuid_t nx_uuid,const uuid_t nx_if_uuid)1460 kern_nexus_ifdetach(const nexus_controller_t ncd,
1461 const uuid_t nx_uuid, const uuid_t nx_if_uuid)
1462 {
1463 struct nx_cfg_req ncr;
1464 struct nx_spec_req nsr;
1465 struct sockopt sopt;
1466 struct nxctl *nxctl;
1467 int err = 0;
1468
1469 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1470 nx_if_uuid == NULL || uuid_is_null(nx_if_uuid)) {
1471 return EINVAL;
1472 }
1473
1474 bzero(&nsr, sizeof(nsr));
1475 uuid_copy(nsr.nsr_if_uuid, nx_if_uuid);
1476
1477 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_DETACH,
1478 &nsr, sizeof(nsr));
1479
1480 bzero(&sopt, sizeof(sopt));
1481 sopt.sopt_dir = SOPT_SET;
1482 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1483 sopt.sopt_val = (user_addr_t)&ncr;
1484 sopt.sopt_valsize = sizeof(ncr);
1485 sopt.sopt_p = kernproc;
1486
1487 nxctl = ncd->ncd_nxctl;
1488 NXCTL_LOCK(nxctl);
1489 err = nxctl_set_opt(nxctl, &sopt);
1490 NXCTL_UNLOCK(nxctl);
1491
1492 return err;
1493 }
1494
1495 int
kern_nexus_get_netif_instance(struct ifnet * ifp,uuid_t nx_uuid)1496 kern_nexus_get_netif_instance(struct ifnet *ifp, uuid_t nx_uuid)
1497 {
1498 struct nexus_netif_adapter *if_na;
1499 int err = 0;
1500
1501 SK_LOCK();
1502 if_na = ifp->if_na;
1503 if (if_na != NULL) {
1504 uuid_copy(nx_uuid, if_na->nifna_up.na_nx->nx_uuid);
1505 } else {
1506 err = ENXIO;
1507 }
1508 SK_UNLOCK();
1509 if (err != 0) {
1510 uuid_clear(nx_uuid);
1511 }
1512
1513 return err;
1514 }
1515
1516 int
kern_nexus_get_flowswitch_instance(struct ifnet * ifp,uuid_t nx_uuid)1517 kern_nexus_get_flowswitch_instance(struct ifnet *ifp, uuid_t nx_uuid)
1518 {
1519 struct nexus_netif_adapter *if_na;
1520 struct nx_flowswitch *fsw = NULL;
1521 int err = 0;
1522
1523 SK_LOCK();
1524 if_na = ifp->if_na;
1525 if (if_na != NULL) {
1526 fsw = ifp->if_na->nifna_netif->nif_fsw;
1527 }
1528 if (fsw != NULL) {
1529 uuid_copy(nx_uuid, fsw->fsw_nx->nx_uuid);
1530 } else {
1531 err = ENXIO;
1532 }
1533 SK_UNLOCK();
1534 if (err != 0) {
1535 uuid_clear(nx_uuid);
1536 }
1537
1538 return err;
1539 }
1540
1541 static void
kern_nexus_netagent_add(struct kern_nexus * nx,void * arg0)1542 kern_nexus_netagent_add(struct kern_nexus *nx, void *arg0)
1543 {
1544 #pragma unused(arg0)
1545 nx_fsw_netagent_add(nx);
1546 }
1547
1548 static void
kern_nexus_netagent_remove(struct kern_nexus * nx,void * arg0)1549 kern_nexus_netagent_remove(struct kern_nexus *nx, void *arg0)
1550 {
1551 #pragma unused(arg0)
1552 nx_fsw_netagent_remove(nx);
1553 }
1554
1555 static void
kern_nexus_netagent_update(struct kern_nexus * nx,void * arg0)1556 kern_nexus_netagent_update(struct kern_nexus *nx, void *arg0)
1557 {
1558 #pragma unused(arg0)
1559 nx_fsw_netagent_update(nx);
1560 }
1561
1562 void
kern_nexus_register_netagents(void)1563 kern_nexus_register_netagents(void)
1564 {
1565 kern_nexus_walktree(kern_nexus_netagent_add, NULL, FALSE);
1566 }
1567
1568 void
kern_nexus_deregister_netagents(void)1569 kern_nexus_deregister_netagents(void)
1570 {
1571 kern_nexus_walktree(kern_nexus_netagent_remove, NULL, FALSE);
1572 }
1573
1574 void
kern_nexus_update_netagents(void)1575 kern_nexus_update_netagents(void)
1576 {
1577 kern_nexus_walktree(kern_nexus_netagent_update, NULL, FALSE);
1578 }
1579
1580 static int
_interface_add_remove_netagent(struct ifnet * ifp,bool add)1581 _interface_add_remove_netagent(struct ifnet *ifp, bool add)
1582 {
1583 struct nexus_netif_adapter *if_na;
1584 int err = ENXIO;
1585
1586 SK_LOCK();
1587 if_na = ifp->if_na;
1588 if (if_na != NULL) {
1589 struct nx_flowswitch *fsw;
1590
1591 fsw = if_na->nifna_netif->nif_fsw;
1592 if (fsw != NULL) {
1593 if (add) {
1594 err = nx_fsw_netagent_add(fsw->fsw_nx);
1595 } else {
1596 err = nx_fsw_netagent_remove(fsw->fsw_nx);
1597 }
1598 }
1599 }
1600 SK_UNLOCK();
1601 return err;
1602 }
1603
1604 int
kern_nexus_interface_add_netagent(struct ifnet * ifp)1605 kern_nexus_interface_add_netagent(struct ifnet *ifp)
1606 {
1607 return _interface_add_remove_netagent(ifp, true);
1608 }
1609
1610 int
kern_nexus_interface_remove_netagent(struct ifnet * ifp)1611 kern_nexus_interface_remove_netagent(struct ifnet *ifp)
1612 {
1613 return _interface_add_remove_netagent(ifp, false);
1614 }
1615
1616 int
kern_nexus_set_netif_input_tbr_rate(struct ifnet * ifp,uint64_t rate)1617 kern_nexus_set_netif_input_tbr_rate(struct ifnet *ifp, uint64_t rate)
1618 {
1619 /* input tbr is only functional with active netif attachment */
1620 if (ifp->if_na == NULL) {
1621 if (rate != 0) {
1622 return EINVAL;
1623 } else {
1624 return 0;
1625 }
1626 }
1627
1628 ifp->if_na->nifna_netif->nif_input_rate = rate;
1629 return 0;
1630 }
1631
1632 int
kern_nexus_set_if_netem_params(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1633 kern_nexus_set_if_netem_params(const nexus_controller_t ncd,
1634 const uuid_t nx_uuid, void *data, size_t data_len)
1635 {
1636 struct nx_cfg_req ncr;
1637 struct sockopt sopt;
1638 struct nxctl *nxctl;
1639 int err = 0;
1640
1641 if (nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1642 data_len < sizeof(struct if_netem_params)) {
1643 return EINVAL;
1644 }
1645
1646 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_NETEM,
1647 data, data_len);
1648 bzero(&sopt, sizeof(sopt));
1649 sopt.sopt_dir = SOPT_SET;
1650 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1651 sopt.sopt_val = (user_addr_t)&ncr;
1652 sopt.sopt_valsize = sizeof(ncr);
1653 sopt.sopt_p = kernproc;
1654
1655 nxctl = ncd->ncd_nxctl;
1656 NXCTL_LOCK(nxctl);
1657 err = nxctl_set_opt(nxctl, &sopt);
1658 NXCTL_UNLOCK(nxctl);
1659
1660 return err;
1661 }
1662
1663 static int
_kern_nexus_flow_config(const nexus_controller_t ncd,const uuid_t nx_uuid,const nxcfg_cmd_t cmd,void * data,size_t data_len)1664 _kern_nexus_flow_config(const nexus_controller_t ncd, const uuid_t nx_uuid,
1665 const nxcfg_cmd_t cmd, void *data, size_t data_len)
1666 {
1667 struct nx_cfg_req ncr;
1668 struct sockopt sopt;
1669 struct nxctl *nxctl;
1670 int err = 0;
1671
1672 if (nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1673 data_len < sizeof(struct nx_flow_req)) {
1674 return EINVAL;
1675 }
1676
1677 __nexus_config_req_prepare(&ncr, nx_uuid, cmd, data, data_len);
1678
1679 bzero(&sopt, sizeof(sopt));
1680 sopt.sopt_dir = SOPT_SET;
1681 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1682 sopt.sopt_val = (user_addr_t)&ncr;
1683 sopt.sopt_valsize = sizeof(ncr);
1684 sopt.sopt_p = kernproc;
1685
1686 nxctl = ncd->ncd_nxctl;
1687 NXCTL_LOCK(nxctl);
1688 err = nxctl_set_opt(nxctl, &sopt);
1689 NXCTL_UNLOCK(nxctl);
1690
1691 return err;
1692 }
1693
1694 int
kern_nexus_flow_add(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1695 kern_nexus_flow_add(const nexus_controller_t ncd, const uuid_t nx_uuid,
1696 void *data, size_t data_len)
1697 {
1698 return _kern_nexus_flow_config(ncd, nx_uuid, NXCFG_CMD_FLOW_ADD, data,
1699 data_len);
1700 }
1701
1702 int
kern_nexus_flow_del(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1703 kern_nexus_flow_del(const nexus_controller_t ncd, const uuid_t nx_uuid,
1704 void *data, size_t data_len)
1705 {
1706 return _kern_nexus_flow_config(ncd, nx_uuid, NXCFG_CMD_FLOW_DEL, data,
1707 data_len);
1708 }
1709
1710 static struct kern_nexus_domain_provider *
nxdom_prov_alloc(zalloc_flags_t how)1711 nxdom_prov_alloc(zalloc_flags_t how)
1712 {
1713 SK_LOCK_ASSERT_HELD();
1714
1715 return zalloc_flags(nxdom_prov_zone, how | Z_ZERO);
1716 }
1717
1718 static void
nxdom_prov_free(struct kern_nexus_domain_provider * nxdom_prov)1719 nxdom_prov_free(struct kern_nexus_domain_provider *nxdom_prov)
1720 {
1721 SK_LOCK_ASSERT_HELD();
1722
1723 ASSERT(nxdom_prov->nxdom_prov_refcnt == 0);
1724 ASSERT(!(nxdom_prov->nxdom_prov_flags &
1725 (NXDOMPROVF_ATTACHED | NXDOMPROVF_DETACHING)));
1726
1727 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED) {
1728 /*
1729 * Tell the domain provider that we're done with this
1730 * instance, and it is now free to go away.
1731 */
1732 if (nxdom_prov->nxdom_prov_fini != NULL) {
1733 nxdom_prov->nxdom_prov_fini(nxdom_prov);
1734 }
1735 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_INITIALIZED;
1736 }
1737 uuid_clear(nxdom_prov->nxdom_prov_uuid);
1738 nxdom_prov->nxdom_prov_dom = NULL;
1739
1740 SK_DF(SK_VERB_MEM, "nxdom_prov 0x%llx %s", SK_KVA(nxdom_prov),
1741 ((nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) ?
1742 "FREE" : "DESTROY"));
1743 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) {
1744 zfree(nxdom_prov_zone, nxdom_prov);
1745 }
1746 }
1747
1748 void
nxdom_prov_retain_locked(struct kern_nexus_domain_provider * nxdom_prov)1749 nxdom_prov_retain_locked(struct kern_nexus_domain_provider *nxdom_prov)
1750 {
1751 SK_LOCK_ASSERT_HELD();
1752
1753 nxdom_prov->nxdom_prov_refcnt++;
1754 ASSERT(nxdom_prov->nxdom_prov_refcnt != 0);
1755 }
1756
1757 void
nxdom_prov_retain(struct kern_nexus_domain_provider * nxdom_prov)1758 nxdom_prov_retain(struct kern_nexus_domain_provider *nxdom_prov)
1759 {
1760 SK_LOCK();
1761 nxdom_prov_retain_locked(nxdom_prov);
1762 SK_UNLOCK();
1763 }
1764
1765 static int
nxdom_prov_params_default(struct kern_nexus_domain_provider * nxdom_prov,const uint32_t req,const struct nxprov_params * nxp0,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],uint32_t pp_region_config_flags)1766 nxdom_prov_params_default(struct kern_nexus_domain_provider *nxdom_prov,
1767 const uint32_t req, const struct nxprov_params *nxp0,
1768 struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS],
1769 uint32_t pp_region_config_flags)
1770 {
1771 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
1772
1773 return nxprov_params_adjust(nxdom_prov, req, nxp0, nxp, srp,
1774 nxdom, nxdom, nxdom, pp_region_config_flags, NULL);
1775 }
1776
1777 int
nxdom_prov_validate_params(struct kern_nexus_domain_provider * nxdom_prov,const struct nxprov_reg * reg,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],const uint32_t oflags,uint32_t pp_region_config_flags)1778 nxdom_prov_validate_params(struct kern_nexus_domain_provider *nxdom_prov,
1779 const struct nxprov_reg *reg, struct nxprov_params *nxp,
1780 struct skmem_region_params srp[SKMEM_REGIONS], const uint32_t oflags,
1781 uint32_t pp_region_config_flags)
1782 {
1783 const struct nxprov_params *nxp0 = ®->nxpreg_params;
1784 const uint32_t req = reg->nxpreg_requested;
1785 int i, err = 0;
1786
1787 ASSERT(reg->nxpreg_version == NXPROV_REG_CURRENT_VERSION &&
1788 nxp0->nxp_namelen != 0 &&
1789 nxp0->nxp_namelen <= sizeof(nexus_name_t));
1790
1791 /* fill in with default values and let the nexus override them */
1792 bzero(nxp, sizeof(*nxp));
1793 bcopy(&nxp0->nxp_name, &nxp->nxp_name, sizeof(nxp->nxp_name));
1794 nxp->nxp_name[sizeof(nxp->nxp_name) - 1] = '\0';
1795 nxp->nxp_namelen = nxp0->nxp_namelen;
1796 nxp->nxp_type = nxp0->nxp_type;
1797 nxp->nxp_md_type = nxdom_prov->nxdom_prov_dom->nxdom_md_type;
1798 nxp->nxp_md_subtype = nxdom_prov->nxdom_prov_dom->nxdom_md_subtype;
1799 nxp->nxp_flags = (nxp0->nxp_flags & NXPF_MASK);
1800 nxp->nxp_flags |= oflags; /* override */
1801 nxp->nxp_format = nxp0->nxp_format;
1802 nxp->nxp_ifindex = nxp0->nxp_ifindex;
1803 nxp->nxp_reject_on_close = nxp0->nxp_reject_on_close;
1804
1805 /* inherit default region parameters */
1806 for (i = 0; i < SKMEM_REGIONS; i++) {
1807 srp[i] = *skmem_get_default(i);
1808 }
1809
1810 if (nxdom_prov->nxdom_prov_params != NULL) {
1811 err = nxdom_prov->nxdom_prov_params(nxdom_prov, req, nxp0,
1812 nxp, srp, pp_region_config_flags);
1813 } else {
1814 err = nxdom_prov_params_default(nxdom_prov, req, nxp0,
1815 nxp, srp, pp_region_config_flags);
1816 }
1817 return err;
1818 }
1819
1820 boolean_t
nxdom_prov_release_locked(struct kern_nexus_domain_provider * nxdom_prov)1821 nxdom_prov_release_locked(struct kern_nexus_domain_provider *nxdom_prov)
1822 {
1823 int oldref = nxdom_prov->nxdom_prov_refcnt;
1824
1825 SK_LOCK_ASSERT_HELD();
1826
1827 ASSERT(nxdom_prov->nxdom_prov_refcnt != 0);
1828 if (--nxdom_prov->nxdom_prov_refcnt == 0) {
1829 nxdom_prov_free(nxdom_prov);
1830 }
1831
1832 return oldref == 1;
1833 }
1834
1835 boolean_t
nxdom_prov_release(struct kern_nexus_domain_provider * nxdom_prov)1836 nxdom_prov_release(struct kern_nexus_domain_provider *nxdom_prov)
1837 {
1838 boolean_t lastref;
1839
1840 SK_LOCK();
1841 lastref = nxdom_prov_release_locked(nxdom_prov);
1842 SK_UNLOCK();
1843
1844 return lastref;
1845 }
1846
1847 static uint32_t
nxprov_bound_var(uint32_t * v,uint32_t dflt,uint32_t lo,uint32_t hi,const char * msg)1848 nxprov_bound_var(uint32_t *v, uint32_t dflt, uint32_t lo, uint32_t hi,
1849 const char *msg)
1850 {
1851 #pragma unused(msg)
1852 uint32_t oldv = *v;
1853 const char *op = NULL;
1854
1855 if (dflt < lo) {
1856 dflt = lo;
1857 }
1858 if (dflt > hi) {
1859 dflt = hi;
1860 }
1861 if (oldv < lo) {
1862 *v = dflt;
1863 op = "bump";
1864 } else if (oldv > hi) {
1865 *v = hi;
1866 op = "clamp";
1867 }
1868 #if SK_LOG
1869 if (op != NULL && msg != NULL) {
1870 SK_ERR("%s %s to %u (was %u)", op, msg, *v, oldv);
1871 }
1872 #endif /* SK_LOG */
1873 return *v;
1874 }
1875
1876 #define NXPROV_PARAMS_ADJUST(flag, param) do { \
1877 uint32_t _v0, _v; \
1878 if (req & (flag)) \
1879 _v = nxp0->nxp_##param; \
1880 else \
1881 _v = NXDOM_DEF(nxdom_def, param); \
1882 _v0 = _v; \
1883 if (nxprov_bound_var(&_v, NXDOM_DEF(nxdom_def, param), \
1884 NXDOM_MIN(nxdom_min, param), NXDOM_MAX(nxdom_max, param), \
1885 "nxp_" #param) < _v0) { \
1886 err = ENOMEM; \
1887 goto error; \
1888 } \
1889 nxp->nxp_##param = _v; \
1890 } while (0)
1891
1892 #define MUL(x, y, z) do { \
1893 if (__builtin_mul_overflow((x), (y), (z))) { \
1894 overflowline = __LINE__; \
1895 goto error; \
1896 } \
1897 } while (0)
1898
1899 #define ADD(x, y, z) do { \
1900 if (__builtin_add_overflow((x), (y), (z))) { \
1901 overflowline = __LINE__; \
1902 goto error; \
1903 } \
1904 } while (0)
1905
1906 int
nxprov_params_adjust(struct kern_nexus_domain_provider * nxdom_prov,const uint32_t req,const struct nxprov_params * nxp0,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],const struct nxdom * nxdom_def,const struct nxdom * nxdom_min,const struct nxdom * nxdom_max,uint32_t pp_region_config_flags,int (* adjust_fn)(const struct kern_nexus_domain_provider *,const struct nxprov_params *,struct nxprov_adjusted_params *))1907 nxprov_params_adjust(struct kern_nexus_domain_provider *nxdom_prov,
1908 const uint32_t req, const struct nxprov_params *nxp0,
1909 struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS],
1910 const struct nxdom *nxdom_def, const struct nxdom *nxdom_min,
1911 const struct nxdom *nxdom_max, uint32_t pp_region_config_flags,
1912 int (*adjust_fn)(const struct kern_nexus_domain_provider *,
1913 const struct nxprov_params *, struct nxprov_adjusted_params *))
1914 {
1915 uint32_t buf_cnt;
1916 uint32_t stats_size;
1917 uint32_t flowadv_max;
1918 uint32_t nexusadv_size;
1919 uint32_t capabs;
1920 uint32_t tx_rings, rx_rings;
1921 uint32_t alloc_rings = 0, free_rings = 0, ev_rings = 0;
1922 uint32_t tx_slots, rx_slots;
1923 uint32_t alloc_slots = 0, free_slots = 0, ev_slots = 0;
1924 uint32_t buf_size, buf_region_segment_size, max_buffers = 0;
1925 uint32_t tmp1, tmp2, tmp3, tmp4xpipes, tmpsumrings;
1926 uint32_t tmpsumall, tmp4xpipesplusrings;
1927 uint32_t large_buf_size;
1928 int overflowline = 0;
1929 int err = 0;
1930
1931 NXPROV_PARAMS_ADJUST(NXPREQ_TX_RINGS, tx_rings);
1932 NXPROV_PARAMS_ADJUST(NXPREQ_RX_RINGS, rx_rings);
1933 NXPROV_PARAMS_ADJUST(NXPREQ_TX_SLOTS, tx_slots);
1934 NXPROV_PARAMS_ADJUST(NXPREQ_RX_SLOTS, rx_slots);
1935 NXPROV_PARAMS_ADJUST(NXPREQ_BUF_SIZE, buf_size);
1936 NXPROV_PARAMS_ADJUST(NXPREQ_LARGE_BUF_SIZE, large_buf_size);
1937 NXPROV_PARAMS_ADJUST(NXPREQ_STATS_SIZE, stats_size);
1938 NXPROV_PARAMS_ADJUST(NXPREQ_FLOWADV_MAX, flowadv_max);
1939 NXPROV_PARAMS_ADJUST(NXPREQ_NEXUSADV_SIZE, nexusadv_size);
1940 NXPROV_PARAMS_ADJUST(NXPREQ_PIPES, pipes);
1941 NXPROV_PARAMS_ADJUST(NXPREQ_EXTENSIONS, extensions);
1942 NXPROV_PARAMS_ADJUST(NXPREQ_MHINTS, mhints);
1943 NXPROV_PARAMS_ADJUST(NXPREQ_CAPABILITIES, capabilities);
1944 NXPROV_PARAMS_ADJUST(NXPREQ_QMAP, qmap);
1945 NXPROV_PARAMS_ADJUST(NXPREQ_MAX_FRAGS, max_frags);
1946
1947 capabs = NXDOM_DEF(nxdom_def, capabilities);
1948 if (req & NXPREQ_USER_CHANNEL) {
1949 if (nxp->nxp_flags & NXPF_USER_CHANNEL) {
1950 capabs |= NXPCAP_USER_CHANNEL;
1951 } else {
1952 capabs &= ~NXPCAP_USER_CHANNEL;
1953 }
1954 } else {
1955 if (capabs & NXPCAP_USER_CHANNEL) {
1956 nxp->nxp_flags |= NXPF_USER_CHANNEL;
1957 } else {
1958 nxp->nxp_flags &= ~NXPF_USER_CHANNEL;
1959 }
1960 }
1961
1962 if (NXDOM_MIN(nxdom_min, capabilities) != 0 &&
1963 !(capabs & NXDOM_MIN(nxdom_min, capabilities))) {
1964 SK_ERR("%s: caps 0x%b < min 0x%b",
1965 nxdom_prov->nxdom_prov_name, capabs, NXPCAP_BITS,
1966 NXDOM_MIN(nxdom_min, capabilities), NXPCAP_BITS);
1967 err = EINVAL;
1968 goto error;
1969 } else if (NXDOM_MAX(nxdom_max, capabilities) != 0 &&
1970 (capabs & ~NXDOM_MAX(nxdom_max, capabilities))) {
1971 SK_ERR("%s: caps 0x%b > max 0x%b",
1972 nxdom_prov->nxdom_prov_name, capabs, NXPCAP_BITS,
1973 NXDOM_MAX(nxdom_max, capabilities), NXPCAP_BITS);
1974 err = EINVAL;
1975 goto error;
1976 }
1977
1978 stats_size = nxp->nxp_stats_size;
1979 flowadv_max = nxp->nxp_flowadv_max;
1980 nexusadv_size = nxp->nxp_nexusadv_size;
1981 tx_rings = nxp->nxp_tx_rings;
1982 rx_rings = nxp->nxp_rx_rings;
1983 tx_slots = nxp->nxp_tx_slots;
1984 rx_slots = nxp->nxp_rx_slots;
1985 buf_size = nxp->nxp_buf_size;
1986 large_buf_size = nxp->nxp_large_buf_size;
1987 buf_region_segment_size = skmem_usr_buf_seg_size;
1988 ASSERT(pp_region_config_flags & PP_REGION_CONFIG_MD_MAGAZINE_ENABLE);
1989
1990 if (adjust_fn != NULL) {
1991 struct nxprov_adjusted_params adj = {
1992 .adj_md_subtype = &nxp->nxp_md_subtype,
1993 .adj_stats_size = &stats_size,
1994 .adj_flowadv_max = &flowadv_max,
1995 .adj_nexusadv_size = &nexusadv_size,
1996 .adj_caps = &capabs,
1997 .adj_tx_rings = &tx_rings,
1998 .adj_rx_rings = &rx_rings,
1999 .adj_tx_slots = &tx_slots,
2000 .adj_rx_slots = &rx_slots,
2001 .adj_alloc_rings = &alloc_rings,
2002 .adj_free_rings = &free_rings,
2003 .adj_alloc_slots = &alloc_slots,
2004 .adj_free_slots = &free_slots,
2005 .adj_buf_size = &buf_size,
2006 .adj_buf_region_segment_size = &buf_region_segment_size,
2007 .adj_pp_region_config_flags = &pp_region_config_flags,
2008 .adj_max_frags = &nxp->nxp_max_frags,
2009 .adj_event_rings = &ev_rings,
2010 .adj_event_slots = &ev_slots,
2011 .adj_max_buffers = &max_buffers,
2012 .adj_large_buf_size = &large_buf_size,
2013 };
2014 err = adjust_fn(nxdom_prov, nxp, &adj);
2015 if (err != 0) {
2016 goto error;
2017 }
2018
2019 ASSERT(capabs >= NXDOM_MIN(nxdom_min, capabilities));
2020 ASSERT(capabs <= NXDOM_MAX(nxdom_max, capabilities));
2021 }
2022
2023 if (nxp->nxp_max_frags > UINT16_MAX) {
2024 SK_ERR("invalid configuration for max frags %d",
2025 nxp->nxp_max_frags);
2026 err = EINVAL;
2027 }
2028
2029 if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) {
2030 if (tx_rings != rx_rings) {
2031 SK_ERR("invalid configuration: {rx,tx} rings must be"
2032 "in pairs for user pipe rx_rings(%d) tx_rings(%d)",
2033 rx_rings, tx_rings);
2034 err = EINVAL;
2035 }
2036 } else {
2037 if (nxp->nxp_pipes != 0) {
2038 SK_ERR("invalid configuration: pipe configuration is"
2039 "only valid for user pipe nexus, type %d, pipes %d",
2040 nxp->nxp_type, nxp->nxp_pipes);
2041 err = EINVAL;
2042 }
2043 }
2044 if (err != 0) {
2045 goto error;
2046 }
2047
2048 /* leading and trailing guard pages (if applicable) */
2049 if (sk_guard) {
2050 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = SKMEM_PAGE_SIZE;
2051 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = sk_headguard_sz;
2052 skmem_region_params_config(&srp[SKMEM_REGION_GUARD_HEAD]);
2053 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = SKMEM_PAGE_SIZE;
2054 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = sk_tailguard_sz;
2055 skmem_region_params_config(&srp[SKMEM_REGION_GUARD_TAIL]);
2056 } else {
2057 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = 0;
2058 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = 0;
2059 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = 0;
2060 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = 0;
2061 }
2062
2063 /* update to the adjusted/configured values */
2064 nxp->nxp_buf_size = buf_size;
2065 nxp->nxp_tx_slots = tx_slots;
2066 nxp->nxp_rx_slots = rx_slots;
2067 nxp->nxp_large_buf_size = large_buf_size;
2068
2069 SK_D("nxdom \"%s\" (0x%llx) type %d",
2070 nxdom_prov->nxdom_prov_dom->nxdom_name,
2071 SK_KVA(nxdom_prov->nxdom_prov_dom),
2072 nxdom_prov->nxdom_prov_dom->nxdom_type);
2073 SK_D("nxp \"%s\" (0x%llx) flags 0x%b",
2074 nxp->nxp_name, SK_KVA(nxp), nxp->nxp_flags, NXPF_BITS);
2075 SK_D(" req 0x%b rings %u/%u/%u/%u/%u slots %u/%u/%u/%u/%u buf %u "
2076 "type %u subtype %u stats %u flowadv_max %u nexusadv_size %u "
2077 "capabs 0x%b pipes %u extensions %u max_frags %u headguard %u "
2078 "tailguard %u large_buf %u", req, NXPREQ_BITS, tx_rings, rx_rings,
2079 alloc_rings, free_rings, ev_rings, tx_slots, rx_slots, alloc_slots,
2080 free_slots, ev_slots, nxp->nxp_buf_size, nxp->nxp_md_type,
2081 nxp->nxp_md_subtype, stats_size, flowadv_max, nexusadv_size,
2082 capabs, NXPCAP_BITS, nxp->nxp_pipes, nxp->nxp_extensions,
2083 nxp->nxp_max_frags, srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size *
2084 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt,
2085 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size *
2086 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt,
2087 nxp->nxp_large_buf_size);
2088
2089 /*
2090 * tmp4xpipes = 4 * nxp->nxp_pipes
2091 */
2092 MUL(4, nxp->nxp_pipes, &tmp4xpipes);
2093
2094 /*
2095 * tmp4xpipesplusrings = tx_rings + (4 * nxp->nxp_pipes)
2096 */
2097 VERIFY((tmp4xpipes == 0) || (rx_rings == tx_rings));
2098 ADD(tx_rings, tmp4xpipes, &tmp4xpipesplusrings);
2099
2100 /*
2101 * tmpsumrings = tx_rings + rx_rings + alloc_rings + free_rings +
2102 * ev_rings
2103 */
2104 ADD(tx_rings, rx_rings, &tmpsumrings);
2105 ADD(tmpsumrings, alloc_rings, &tmpsumrings);
2106 ADD(tmpsumrings, free_rings, &tmpsumrings);
2107 ADD(tmpsumrings, ev_rings, &tmpsumrings);
2108
2109 /*
2110 * tmpsumall = (tx_rings + rx_rings +
2111 * alloc_rings + free_rings + ev_rings + (4 * nxp->nxp_pipes))
2112 */
2113 ADD(tmpsumrings, tmp4xpipes, &tmpsumall);
2114
2115 /* possibly increase them to fit user request */
2116 VERIFY(CHANNEL_SCHEMA_SIZE(tmpsumrings) <= UINT32_MAX);
2117 srp[SKMEM_REGION_SCHEMA].srp_r_obj_size =
2118 (uint32_t)CHANNEL_SCHEMA_SIZE(tmpsumrings);
2119 /* worst case is one channel bound to each ring pair */
2120 srp[SKMEM_REGION_SCHEMA].srp_r_obj_cnt = tmp4xpipesplusrings;
2121
2122 skmem_region_params_config(&srp[SKMEM_REGION_SCHEMA]);
2123
2124 srp[SKMEM_REGION_RING].srp_r_obj_size =
2125 sizeof(struct __user_channel_ring);
2126 /* each pipe endpoint needs two tx rings and two rx rings */
2127 srp[SKMEM_REGION_RING].srp_r_obj_cnt = tmpsumall;
2128 skmem_region_params_config(&srp[SKMEM_REGION_RING]);
2129
2130 /*
2131 * For each pipe we only need the buffers for the "real" rings.
2132 * On the other end, the pipe ring dimension may be different from
2133 * the parent port ring dimension. As a compromise, we allocate twice
2134 * the space actually needed if the pipe rings were the same size as
2135 * the parent rings.
2136 *
2137 * buf_cnt = ((4 * nxp->nxp_pipes) + rx_rings) * rx_slots +
2138 * ((4 * nxp->nxp_pipes) + tx_rings) * tx_slots +
2139 * (ev_rings * ev_slots);
2140 */
2141 if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) {
2142 MUL(tmp4xpipesplusrings, rx_slots, &tmp1);
2143 MUL(tmp4xpipesplusrings, tx_slots, &tmp2);
2144 ASSERT(ev_rings == 0);
2145 tmp3 = 0;
2146 } else {
2147 MUL(rx_rings, rx_slots, &tmp1);
2148 MUL(tx_rings, tx_slots, &tmp2);
2149 MUL(ev_rings, ev_slots, &tmp3);
2150 }
2151 ADD(tmp1, tmp2, &buf_cnt);
2152 ADD(tmp3, buf_cnt, &buf_cnt);
2153
2154 if (nxp->nxp_max_frags > 1) {
2155 pp_region_config_flags |= PP_REGION_CONFIG_BUFLET;
2156 buf_cnt = MIN((((uint32_t)P2ROUNDUP(NX_MAX_AGGR_PKT_SIZE,
2157 nxp->nxp_buf_size) / nxp->nxp_buf_size) * buf_cnt),
2158 (buf_cnt * nxp->nxp_max_frags));
2159 }
2160
2161 if (max_buffers != 0) {
2162 buf_cnt = MIN(max_buffers, buf_cnt);
2163 }
2164
2165 if ((nxp->nxp_flags & NXPF_USER_CHANNEL) == 0) {
2166 pp_region_config_flags |= PP_REGION_CONFIG_KERNEL_ONLY;
2167 }
2168
2169 /* # of metadata objects is same as the # of buffer objects */
2170 ASSERT(buf_region_segment_size != 0);
2171 pp_regions_params_adjust(srp, nxp->nxp_md_type, nxp->nxp_md_subtype,
2172 buf_cnt, (uint16_t)nxp->nxp_max_frags, nxp->nxp_buf_size,
2173 nxp->nxp_large_buf_size, buf_cnt, buf_region_segment_size,
2174 pp_region_config_flags);
2175
2176 /* statistics region size */
2177 if (stats_size != 0) {
2178 srp[SKMEM_REGION_USTATS].srp_r_obj_size = stats_size;
2179 srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 1;
2180 skmem_region_params_config(&srp[SKMEM_REGION_USTATS]);
2181 } else {
2182 srp[SKMEM_REGION_USTATS].srp_r_obj_size = 0;
2183 srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 0;
2184 srp[SKMEM_REGION_USTATS].srp_c_obj_size = 0;
2185 srp[SKMEM_REGION_USTATS].srp_c_obj_cnt = 0;
2186 }
2187
2188 /* flow advisory region size */
2189 if (flowadv_max != 0) {
2190 _CASSERT(NX_FLOWADV_DEFAULT * sizeof(struct __flowadv_entry) <=
2191 SKMEM_MIN_SEG_SIZE);
2192 MUL(sizeof(struct __flowadv_entry), flowadv_max, &tmp1);
2193 srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = tmp1;
2194 srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 1;
2195 skmem_region_params_config(&srp[SKMEM_REGION_FLOWADV]);
2196 } else {
2197 srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = 0;
2198 srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 0;
2199 srp[SKMEM_REGION_FLOWADV].srp_c_obj_size = 0;
2200 srp[SKMEM_REGION_FLOWADV].srp_c_obj_cnt = 0;
2201 }
2202
2203 /* nexus advisory region size */
2204 if (nexusadv_size != 0) {
2205 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = nexusadv_size +
2206 sizeof(struct __kern_nexus_adv_metadata);
2207 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 1;
2208 skmem_region_params_config(&srp[SKMEM_REGION_NEXUSADV]);
2209 } else {
2210 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = 0;
2211 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 0;
2212 srp[SKMEM_REGION_NEXUSADV].srp_c_obj_size = 0;
2213 srp[SKMEM_REGION_NEXUSADV].srp_c_obj_cnt = 0;
2214 }
2215
2216 /* sysctls region is not applicable to nexus */
2217 srp[SKMEM_REGION_SYSCTLS].srp_r_obj_size = 0;
2218 srp[SKMEM_REGION_SYSCTLS].srp_r_obj_cnt = 0;
2219 srp[SKMEM_REGION_SYSCTLS].srp_c_obj_size = 0;
2220 srp[SKMEM_REGION_SYSCTLS].srp_c_obj_cnt = 0;
2221
2222 /*
2223 * Since the tx/alloc/event slots share the same region and cache,
2224 * we will use the same object size for both types of slots.
2225 */
2226 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size =
2227 (MAX(MAX(tx_slots, alloc_slots), ev_slots)) * SLOT_DESC_SZ;
2228 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt = tx_rings + alloc_rings +
2229 ev_rings;
2230 skmem_region_params_config(&srp[SKMEM_REGION_TXAKSD]);
2231
2232 /* USD and KSD objects share the same size and count */
2233 srp[SKMEM_REGION_TXAUSD].srp_r_obj_size =
2234 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size;
2235 srp[SKMEM_REGION_TXAUSD].srp_r_obj_cnt =
2236 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt;
2237 skmem_region_params_config(&srp[SKMEM_REGION_TXAUSD]);
2238
2239 /*
2240 * Since the rx/free slots share the same region and cache,
2241 * we will use the same object size for both types of slots.
2242 */
2243 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size =
2244 MAX(rx_slots, free_slots) * SLOT_DESC_SZ;
2245 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt = rx_rings + free_rings;
2246 skmem_region_params_config(&srp[SKMEM_REGION_RXFKSD]);
2247
2248 /* USD and KSD objects share the same size and count */
2249 srp[SKMEM_REGION_RXFUSD].srp_r_obj_size =
2250 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size;
2251 srp[SKMEM_REGION_RXFUSD].srp_r_obj_cnt =
2252 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt;
2253 skmem_region_params_config(&srp[SKMEM_REGION_RXFUSD]);
2254
2255 /* update these based on the adjusted/configured values */
2256 nxp->nxp_meta_size = srp[SKMEM_REGION_KMD].srp_c_obj_size;
2257 nxp->nxp_stats_size = stats_size;
2258 nxp->nxp_flowadv_max = flowadv_max;
2259 nxp->nxp_nexusadv_size = nexusadv_size;
2260 nxp->nxp_capabilities = capabs;
2261
2262 error:
2263 if (overflowline) {
2264 err = EOVERFLOW;
2265 SK_ERR("math overflow in %s on line %d",
2266 __func__, overflowline);
2267 }
2268 return err;
2269 }
2270
2271 #undef ADD
2272 #undef MUL
2273 #undef NXPROV_PARAMS_ADJUST
2274
2275 static void
nxprov_detaching_enqueue(struct kern_nexus_domain_provider * nxdom_prov)2276 nxprov_detaching_enqueue(struct kern_nexus_domain_provider *nxdom_prov)
2277 {
2278 SK_LOCK_ASSERT_HELD();
2279
2280 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
2281 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
2282
2283 ++nxprov_detaching_cnt;
2284 ASSERT(nxprov_detaching_cnt != 0);
2285 /*
2286 * Insert this to the detaching list; caller is expected to
2287 * have held a reference, most likely the same one that was
2288 * used for the per-domain provider list.
2289 */
2290 STAILQ_INSERT_TAIL(&nxprov_detaching_head, nxdom_prov,
2291 nxdom_prov_detaching_link);
2292 wakeup((caddr_t)&nxprov_detach_wchan);
2293 }
2294
2295 static struct kern_nexus_domain_provider *
nxprov_detaching_dequeue(void)2296 nxprov_detaching_dequeue(void)
2297 {
2298 struct kern_nexus_domain_provider *nxdom_prov;
2299
2300 SK_LOCK_ASSERT_HELD();
2301
2302 nxdom_prov = STAILQ_FIRST(&nxprov_detaching_head);
2303 ASSERT(nxprov_detaching_cnt != 0 || nxdom_prov == NULL);
2304 if (nxdom_prov != NULL) {
2305 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
2306 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
2307 ASSERT(nxprov_detaching_cnt != 0);
2308 --nxprov_detaching_cnt;
2309 STAILQ_REMOVE(&nxprov_detaching_head, nxdom_prov,
2310 kern_nexus_domain_provider, nxdom_prov_detaching_link);
2311 }
2312 return nxdom_prov;
2313 }
2314
2315 __attribute__((noreturn))
2316 static void
nxprov_detacher(void * v,wait_result_t w)2317 nxprov_detacher(void *v, wait_result_t w)
2318 {
2319 #pragma unused(v, w)
2320 SK_LOCK();
2321 (void) msleep0(&nxprov_detach_wchan, &sk_lock, (PZERO - 1),
2322 __func__, 0, nxprov_detacher_cont);
2323 /*
2324 * msleep0() shouldn't have returned as PCATCH was not set;
2325 * therefore assert in this case.
2326 */
2327 SK_UNLOCK();
2328 VERIFY(0);
2329 /* NOTREACHED */
2330 __builtin_unreachable();
2331 }
2332
2333 static int
nxprov_detacher_cont(int err)2334 nxprov_detacher_cont(int err)
2335 {
2336 #pragma unused(err)
2337 struct kern_nexus_domain_provider *nxdom_prov;
2338
2339 for (;;) {
2340 SK_LOCK_ASSERT_HELD();
2341 while (nxprov_detaching_cnt == 0) {
2342 (void) msleep0(&nxprov_detach_wchan, &sk_lock,
2343 (PZERO - 1), __func__, 0, nxprov_detacher_cont);
2344 /* NOTREACHED */
2345 }
2346
2347 ASSERT(STAILQ_FIRST(&nxprov_detaching_head) != NULL);
2348
2349 nxdom_prov = nxprov_detaching_dequeue();
2350 if (nxdom_prov != NULL) {
2351 nxdom_del_provider_final(nxdom_prov);
2352 }
2353 }
2354 }
2355