1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/upipe/nx_user_pipe.h>
31 #include <skywalk/nexus/kpipe/nx_kernel_pipe.h>
32 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
33 #include <skywalk/nexus/netif/nx_netif.h>
34
35 static STAILQ_HEAD(, nxdom) nexus_domains =
36 STAILQ_HEAD_INITIALIZER(nexus_domains);
37
38 static void nxdom_attach(struct nxdom *);
39 static void nxdom_detach(struct nxdom *);
40 static void nxdom_init(struct nxdom *);
41 static void nxdom_terminate(struct nxdom *);
42 static void nxdom_fini(struct nxdom *);
43 static void nxdom_del_provider_final(struct kern_nexus_domain_provider *);
44
45 static int nxdom_prov_ext_init(struct kern_nexus_domain_provider *);
46 static void nxdom_prov_ext_fini(struct kern_nexus_domain_provider *);
47 static struct kern_nexus_domain_provider *nxdom_prov_alloc(zalloc_flags_t);
48 static void nxdom_prov_free(struct kern_nexus_domain_provider *);
49
50 static uint32_t nxprov_bound_var(uint32_t *, uint32_t, uint32_t, uint32_t,
51 const char *);
52 static void nxprov_detaching_enqueue(struct kern_nexus_domain_provider *);
53 static struct kern_nexus_domain_provider *nxprov_detaching_dequeue(void);
54 static void nxprov_detacher(void *, wait_result_t);
55 static int nxprov_detacher_cont(int);
56
57 static struct nexus_controller *ncd_alloc(zalloc_flags_t);
58 static void ncd_free(struct nexus_controller *);
59
60 static struct nexus_attr *nxa_alloc(zalloc_flags_t);
61 static void nxa_free(struct nexus_attr *);
62
63 static int _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid,
64 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
65 uuid_t *nx_if_uuid);
66
67 static SKMEM_TYPE_DEFINE(ncd_zone, struct nexus_controller);
68
69 static SKMEM_TYPE_DEFINE(nxdom_prov_zone, struct kern_nexus_domain_provider);
70
71 static SKMEM_TYPE_DEFINE(nxa_zone, struct nexus_attr);
72
73 static int __nxdom_inited = 0;
74 static STAILQ_HEAD(, kern_nexus_domain_provider) nxprov_detaching_head =
75 STAILQ_HEAD_INITIALIZER(nxprov_detaching_head);
76 static uint32_t nxprov_detaching_cnt;
77 static void *nxprov_detach_wchan; /* wait channel for detacher */
78
79 /*
80 * Array of default nexus domain providers. Initialized once during
81 * domain attach time; no lock is needed to read as they can be treated
82 * as immutables, since default providers imply built-in ones and they
83 * never detach in practice.
84 */
85 struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX];
86
87 void
nxdom_attach_all(void)88 nxdom_attach_all(void)
89 {
90 struct nxdom *nxdom;
91 thread_t __single tp = THREAD_NULL;
92
93 SK_LOCK_ASSERT_HELD();
94 ASSERT(!__nxdom_inited);
95 ASSERT(STAILQ_EMPTY(&nexus_domains));
96
97 #if CONFIG_NEXUS_FLOWSWITCH
98 nxdom_attach(&nx_flowswitch_dom_s);
99 #endif /* CONFIG_NEXUS_FLOWSWITCH */
100 #if CONFIG_NEXUS_USER_PIPE
101 nxdom_attach(&nx_upipe_dom_s);
102 #endif /* CONFIG_NEXUS_USER_PIPE */
103 #if CONFIG_NEXUS_KERNEL_PIPE
104 nxdom_attach(&nx_kpipe_dom_s);
105 #endif /* CONFIG_NEXUS_KERNEL_PIPE */
106 #if CONFIG_NEXUS_NETIF
107 nxdom_attach(&nx_netif_dom_s);
108 #endif /* CONFIG_NEXUS_NETIF */
109
110 /* ask domains to initialize */
111 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link)
112 nxdom_init(nxdom);
113
114 if (kernel_thread_start(nxprov_detacher, NULL, &tp) != KERN_SUCCESS) {
115 panic_plain("%s: couldn't create detacher thread", __func__);
116 /* NOTREACHED */
117 __builtin_unreachable();
118 }
119 thread_deallocate(tp);
120
121 __nxdom_inited = 1;
122 }
123
124 void
nxdom_detach_all(void)125 nxdom_detach_all(void)
126 {
127 struct nxdom *nxdom, *tnxdom;
128
129 SK_LOCK_ASSERT_HELD();
130
131 if (__nxdom_inited) {
132 STAILQ_FOREACH_SAFE(nxdom, &nexus_domains, nxdom_link, tnxdom) {
133 nxdom_terminate(nxdom);
134 nxdom_fini(nxdom);
135 nxdom_detach(nxdom);
136 }
137
138 /*
139 * TODO: [email protected] -- terminate detacher thread.
140 */
141
142 __nxdom_inited = 0;
143 }
144 ASSERT(STAILQ_EMPTY(&nexus_domains));
145 }
146
147 #define ASSERT_NXDOM_PARAMS(_dom, _var) do { \
148 ASSERT(NXDOM_MIN(_dom, _var) <= NXDOM_MAX(_dom, _var)); \
149 ASSERT(NXDOM_DEF(_dom, _var) >= NXDOM_MIN(_dom, _var)); \
150 ASSERT(NXDOM_DEF(_dom, _var) <= NXDOM_MAX(_dom, _var)); \
151 } while (0)
152
153 static void
nxdom_attach(struct nxdom * nxdom)154 nxdom_attach(struct nxdom *nxdom)
155 {
156 struct nxdom *nxdom1;
157
158 SK_LOCK_ASSERT_HELD();
159 ASSERT(!(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED));
160
161 STAILQ_FOREACH(nxdom1, &nexus_domains, nxdom_link) {
162 if (nxdom1->nxdom_type == nxdom->nxdom_type) {
163 /* type must be unique; this is a programming error */
164 VERIFY(0);
165 /* NOTREACHED */
166 __builtin_unreachable();
167 }
168 }
169
170 /* verify this is a valid type */
171 switch (nxdom->nxdom_type) {
172 case NEXUS_TYPE_USER_PIPE:
173 case NEXUS_TYPE_KERNEL_PIPE:
174 case NEXUS_TYPE_NET_IF:
175 case NEXUS_TYPE_FLOW_SWITCH:
176 break;
177
178 default:
179 VERIFY(0);
180 /* NOTREACHED */
181 __builtin_unreachable();
182 }
183
184 #if (DEVELOPMENT || DEBUG)
185 /*
186 * Override the default ring sizes for flowswitch if configured
187 * via boot-args. Each nexus provider instance can still change
188 * the values if so desired.
189 */
190 if (nxdom->nxdom_type == NEXUS_TYPE_FLOW_SWITCH) {
191 if (sk_txring_sz != 0) {
192 if (sk_txring_sz < NXDOM_MIN(nxdom, tx_slots)) {
193 sk_txring_sz = NXDOM_MIN(nxdom, tx_slots);
194 } else if (sk_txring_sz > NXDOM_MAX(nxdom, tx_slots)) {
195 sk_txring_sz = NXDOM_MAX(nxdom, tx_slots);
196 }
197 NXDOM_DEF(nxdom, tx_slots) = sk_txring_sz;
198 }
199 if (sk_rxring_sz != 0) {
200 if (sk_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) {
201 sk_rxring_sz = NXDOM_MIN(nxdom, rx_slots);
202 } else if (sk_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) {
203 sk_rxring_sz = NXDOM_MAX(nxdom, rx_slots);
204 }
205 NXDOM_DEF(nxdom, rx_slots) = sk_rxring_sz;
206 }
207 }
208 /*
209 * Override the default ring sizes for netif if configured
210 * via boot-args. Each nexus provider instance can still change
211 * the values if so desired.
212 */
213 if (nxdom->nxdom_type == NEXUS_TYPE_NET_IF) {
214 if (sk_net_txring_sz != 0) {
215 if (sk_net_txring_sz < NXDOM_MIN(nxdom, tx_slots)) {
216 sk_net_txring_sz = NXDOM_MIN(nxdom, tx_slots);
217 } else if (sk_net_txring_sz > NXDOM_MAX(nxdom, tx_slots)) {
218 sk_net_txring_sz = NXDOM_MAX(nxdom, tx_slots);
219 }
220 NXDOM_DEF(nxdom, tx_slots) = sk_net_txring_sz;
221 }
222 if (sk_net_rxring_sz != 0) {
223 if (sk_net_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) {
224 sk_net_rxring_sz = NXDOM_MIN(nxdom, rx_slots);
225 } else if (sk_net_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) {
226 sk_net_rxring_sz = NXDOM_MAX(nxdom, rx_slots);
227 }
228 NXDOM_DEF(nxdom, rx_slots) = sk_net_rxring_sz;
229 }
230 }
231
232 #endif /* DEVELOPMENT || DEBUG */
233
234 /* verify that parameters are sane */
235 ASSERT(NXDOM_MAX(nxdom, ports) > 0);
236 ASSERT(NXDOM_MAX(nxdom, ports) <= NEXUS_PORT_MAX);
237 ASSERT_NXDOM_PARAMS(nxdom, ports);
238 ASSERT_NXDOM_PARAMS(nxdom, tx_rings);
239 ASSERT_NXDOM_PARAMS(nxdom, rx_rings);
240 ASSERT(NXDOM_MAX(nxdom, tx_slots) > 0);
241 ASSERT_NXDOM_PARAMS(nxdom, tx_slots);
242 ASSERT(NXDOM_MAX(nxdom, rx_slots) > 0);
243 ASSERT_NXDOM_PARAMS(nxdom, rx_slots);
244 ASSERT_NXDOM_PARAMS(nxdom, buf_size);
245 ASSERT_NXDOM_PARAMS(nxdom, meta_size);
246 ASSERT_NXDOM_PARAMS(nxdom, pipes);
247 ASSERT_NXDOM_PARAMS(nxdom, extensions);
248
249 /* these must exist */
250 ASSERT(nxdom->nxdom_bind_port != NULL);
251 ASSERT(nxdom->nxdom_unbind_port != NULL);
252 ASSERT(nxdom->nxdom_connect != NULL);
253 ASSERT(nxdom->nxdom_disconnect != NULL);
254 ASSERT(nxdom->nxdom_defunct != NULL);
255 ASSERT(nxdom->nxdom_defunct_finalize != NULL);
256
257 STAILQ_INSERT_TAIL(&nexus_domains, nxdom, nxdom_link);
258 nxdom->nxdom_flags |= NEXUSDOMF_ATTACHED;
259 }
260
261 #undef VERIFY_NXDOM_PARAMS
262
263 static void
nxdom_detach(struct nxdom * nxdom)264 nxdom_detach(struct nxdom *nxdom)
265 {
266 SK_LOCK_ASSERT_HELD();
267 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
268
269 STAILQ_REMOVE(&nexus_domains, nxdom, nxdom, nxdom_link);
270 nxdom->nxdom_flags &= ~NEXUSDOMF_ATTACHED;
271 }
272
273 static void
nxdom_init(struct nxdom * nxdom)274 nxdom_init(struct nxdom *nxdom)
275 {
276 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
277
278 SK_LOCK_ASSERT_HELD();
279
280 if (!(nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED)) {
281 if (nxdom->nxdom_init != NULL) {
282 nxdom->nxdom_init(nxdom);
283 }
284 nxdom->nxdom_flags |= NEXUSDOMF_INITIALIZED;
285 }
286 }
287
288 static void
nxdom_terminate(struct nxdom * nxdom)289 nxdom_terminate(struct nxdom *nxdom)
290 {
291 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
292
293 SK_LOCK_ASSERT_HELD();
294
295 if ((nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) &&
296 !(nxdom->nxdom_flags & NEXUSDOMF_TERMINATED)) {
297 if (nxdom->nxdom_terminate != NULL) {
298 nxdom->nxdom_terminate(nxdom);
299 }
300 nxdom->nxdom_flags |= NEXUSDOMF_TERMINATED;
301 }
302 }
303
304 static void
nxdom_fini(struct nxdom * nxdom)305 nxdom_fini(struct nxdom *nxdom)
306 {
307 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
308
309 if (nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) {
310 if (nxdom->nxdom_fini != NULL) {
311 nxdom->nxdom_fini(nxdom);
312 }
313 nxdom->nxdom_flags &= ~NEXUSDOMF_INITIALIZED;
314 }
315 }
316
317 int
nxdom_prov_add(struct nxdom * nxdom,struct kern_nexus_domain_provider * nxdom_prov)318 nxdom_prov_add(struct nxdom *nxdom,
319 struct kern_nexus_domain_provider *nxdom_prov)
320 {
321 struct kern_nexus_domain_provider *nxprov1;
322 nexus_type_t type = nxdom->nxdom_type;
323 boolean_t builtin;
324 int err = 0;
325
326 SK_LOCK_ASSERT_HELD();
327 ASSERT(type < NEXUS_TYPE_MAX);
328
329 builtin = !(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT);
330
331 STAILQ_FOREACH(nxprov1, &nxdom->nxdom_prov_head, nxdom_prov_link) {
332 /*
333 * We can be a little more strict in the kernel and
334 * avoid namespace collision (even though each domain
335 * provider has UUID; this also guarantees that external
336 * providers won't conflict with the builtin ones.
337 */
338 if (strbufcmp(nxprov1->nxdom_prov_name, sizeof(nxprov1->nxdom_prov_name),
339 nxdom_prov->nxdom_prov_name, sizeof(nxdom_prov->nxdom_prov_name)) == 0) {
340 return EEXIST;
341 }
342 }
343
344 VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED));
345 VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED));
346
347 uuid_generate_random(nxdom_prov->nxdom_prov_uuid);
348 nxdom_prov->nxdom_prov_dom = nxdom;
349 if (nxdom_prov->nxdom_prov_init != NULL) {
350 err = nxdom_prov->nxdom_prov_init(nxdom_prov);
351 }
352
353 if (err == 0) {
354 nxdom_prov->nxdom_prov_flags |=
355 (NXDOMPROVF_ATTACHED | NXDOMPROVF_INITIALIZED);
356 STAILQ_INSERT_TAIL(&nxdom->nxdom_prov_head, nxdom_prov,
357 nxdom_prov_link);
358 /* for being in the list */
359 nxdom_prov_retain_locked(nxdom_prov);
360
361 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) {
362 VERIFY(builtin && nxdom_prov_default[type] == NULL);
363 nxdom_prov_default[type] = nxdom_prov;
364 /* for being in the array */
365 nxdom_prov_retain_locked(nxdom_prov);
366 }
367
368 SK_D("nxdom_prov %p (%s) dom %s",
369 SK_KVA(nxdom_prov), nxdom_prov->nxdom_prov_name,
370 nxdom->nxdom_name);
371 } else {
372 uuid_clear(nxdom_prov->nxdom_prov_uuid);
373 nxdom_prov->nxdom_prov_dom = NULL;
374 }
375
376 return err;
377 }
378
379 void
nxdom_prov_del(struct kern_nexus_domain_provider * nxdom_prov)380 nxdom_prov_del(struct kern_nexus_domain_provider *nxdom_prov)
381 {
382 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
383 nexus_type_t type = nxdom->nxdom_type;
384
385 SK_LOCK_ASSERT_HELD();
386 ASSERT(type < NEXUS_TYPE_MAX);
387 ASSERT(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED);
388
389 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DETACHING) {
390 return;
391 }
392
393 SK_D("nxdom_prov %p (%s:%s)", SK_KVA(nxdom_prov), nxdom->nxdom_name,
394 nxdom_prov->nxdom_prov_name);
395
396 /* keep the reference around for the detaching list (see below) */
397 STAILQ_REMOVE(&nxdom->nxdom_prov_head, nxdom_prov,
398 kern_nexus_domain_provider, nxdom_prov_link);
399 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_ATTACHED;
400 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_DETACHING;
401
402 /* there can only be one default and it must match this one */
403 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) {
404 ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT));
405 VERIFY(nxdom_prov_default[type] == nxdom_prov);
406 nxdom_prov_default[type] = NULL;
407 /*
408 * Release reference held for the array; this must
409 * not be the last reference, as there is still at
410 * least one which we kept for the detaching list.
411 */
412 VERIFY(!nxdom_prov_release_locked(nxdom_prov));
413 }
414
415 /* add to detaching list and wake up detacher */
416 nxprov_detaching_enqueue(nxdom_prov);
417 }
418
419 static void
nxdom_del_provider_final(struct kern_nexus_domain_provider * nxdom_prov)420 nxdom_del_provider_final(struct kern_nexus_domain_provider *nxdom_prov)
421 {
422 #if SK_LOG
423 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
424 #endif /* SK_LOG */
425
426 SK_LOCK_ASSERT_HELD();
427
428 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
429 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
430 ASSERT(nxdom != NULL);
431
432 SK_D("nxdom_prov %p (%s:%s)", SK_KVA(nxdom_prov), nxdom->nxdom_name,
433 nxdom_prov->nxdom_prov_name);
434
435 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_DETACHING;
436
437 /*
438 * Release reference held for detaching list; if this is the last
439 * reference, the domain provider's nxdom_prov_fini() callback will
440 * be called (if applicable) within the detacher thread's context.
441 * Otherwise, this will occur when the last nexus provider for that
442 * domain provider has been released.
443 */
444 (void) nxdom_prov_release_locked(nxdom_prov);
445 }
446
447 struct nxdom *
nxdom_find(nexus_type_t type)448 nxdom_find(nexus_type_t type)
449 {
450 struct nxdom *nxdom;
451
452 SK_LOCK_ASSERT_HELD();
453 ASSERT(type < NEXUS_TYPE_MAX);
454
455 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) {
456 if (nxdom->nxdom_type == type) {
457 break;
458 }
459 }
460
461 return nxdom;
462 }
463
464 struct kern_nexus_domain_provider *
nxdom_prov_find(const struct nxdom * nxdom,const char * name)465 nxdom_prov_find(const struct nxdom *nxdom, const char *name)
466 {
467 struct kern_nexus_domain_provider *nxdom_prov = NULL;
468
469 SK_LOCK_ASSERT_HELD();
470
471 if (name != NULL) {
472 STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head,
473 nxdom_prov_link) {
474 if (strlcmp(nxdom_prov->nxdom_prov_name, name,
475 sizeof(nxdom_prov->nxdom_prov_name)) == 0) {
476 break;
477 }
478 }
479 }
480
481 if (nxdom_prov != NULL) {
482 nxdom_prov_retain_locked(nxdom_prov); /* for caller */
483 }
484 return nxdom_prov;
485 }
486
487 struct kern_nexus_domain_provider *
nxdom_prov_find_uuid(const uuid_t dom_prov_uuid)488 nxdom_prov_find_uuid(const uuid_t dom_prov_uuid)
489 {
490 struct kern_nexus_domain_provider *nxdom_prov = NULL;
491 struct nxdom *nxdom;
492
493 SK_LOCK_ASSERT_HELD();
494 ASSERT(dom_prov_uuid != NULL && !uuid_is_null(dom_prov_uuid));
495
496 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) {
497 STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head,
498 nxdom_prov_link) {
499 ASSERT(!uuid_is_null(nxdom_prov->nxdom_prov_uuid));
500 if (uuid_compare(nxdom_prov->nxdom_prov_uuid,
501 dom_prov_uuid) == 0) {
502 break;
503 }
504 }
505 if (nxdom_prov != NULL) {
506 nxdom_prov_retain_locked(nxdom_prov); /* for caller */
507 break;
508 }
509 }
510
511 return nxdom_prov;
512 }
513
514 errno_t
kern_nexus_register_domain_provider(const nexus_type_t type,const nexus_domain_provider_name_t name,const struct kern_nexus_domain_provider_init * init,const uint32_t init_len,uuid_t * dom_prov_uuid)515 kern_nexus_register_domain_provider(const nexus_type_t type,
516 const nexus_domain_provider_name_t name,
517 const struct kern_nexus_domain_provider_init *init,
518 const uint32_t init_len, uuid_t *dom_prov_uuid)
519 {
520 struct kern_nexus_domain_provider *nxdom_prov = NULL;
521 struct nxdom *nxdom;
522 errno_t err = 0;
523
524 static_assert(sizeof(*init) == sizeof(nxdom_prov->nxdom_prov_ext));
525
526 if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) {
527 return EINVAL;
528 }
529
530 uuid_clear(*dom_prov_uuid);
531
532 if (name == NULL || init == NULL || init_len < sizeof(*init) ||
533 init->nxdpi_version != KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION) {
534 return EINVAL;
535 }
536
537 /*
538 * init, fini are required.
539 */
540 if (init->nxdpi_init == NULL || init->nxdpi_fini == NULL) {
541 return EINVAL;
542 }
543
544 SK_LOCK();
545 if (nxdom_prov_default[type] == NULL) {
546 err = ENXIO;
547 goto done;
548 }
549
550 nxdom = nxdom_find(type);
551 if (nxdom == NULL) {
552 err = ENXIO;
553 goto done;
554 }
555
556 /*
557 * Allow only kernel pipe and netif external domain providers for
558 * now, until we understand the implications and requirements for
559 * supporting other domain types. For all other types, using
560 * the built-in domain providers and registering nexus should
561 * suffice.
562 */
563 if (nxdom->nxdom_type != NEXUS_TYPE_KERNEL_PIPE &&
564 nxdom->nxdom_type != NEXUS_TYPE_NET_IF) {
565 err = EINVAL;
566 goto done;
567 }
568
569 nxdom_prov = nxdom_prov_alloc(Z_WAITOK);
570
571 /*
572 * Point all callback routines to the default provider for this
573 * domain; for nxdom_prov{init,fini}, refer to externally-provided
574 * callback routines, if applicable.
575 */
576 bcopy(init, &nxdom_prov->nxdom_prov_ext, sizeof(*init));
577 bcopy(&nxdom_prov_default[type]->nxdom_prov_cb,
578 &nxdom_prov->nxdom_prov_cb, sizeof(struct nxdom_prov_cb));
579 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT;
580 nxdom_prov->nxdom_prov_init = nxdom_prov_ext_init;
581 nxdom_prov->nxdom_prov_fini = nxdom_prov_ext_fini;
582 (void) snprintf(nxdom_prov->nxdom_prov_name,
583 sizeof(nxdom_prov->nxdom_prov_name), "%s", name);
584
585 ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT));
586 err = nxdom_prov_add(nxdom, nxdom_prov);
587 if (err != 0) {
588 nxdom_prov_free(nxdom_prov);
589 nxdom_prov = NULL;
590 }
591
592 done:
593 if (nxdom_prov != NULL) {
594 ASSERT(err == 0 && !uuid_is_null(nxdom_prov->nxdom_prov_uuid));
595 uuid_copy(*dom_prov_uuid, nxdom_prov->nxdom_prov_uuid);
596 }
597 SK_UNLOCK();
598
599 return err;
600 }
601
602 errno_t
kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid)603 kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid)
604 {
605 struct kern_nexus_domain_provider *nxdom_prov = NULL;
606 errno_t err = 0;
607
608 if (dom_prov_uuid == NULL || uuid_is_null(dom_prov_uuid)) {
609 return EINVAL;
610 }
611
612 SK_LOCK();
613 nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid);
614 if (nxdom_prov == NULL) {
615 err = ENXIO;
616 goto done;
617 }
618
619 /* don't allow external request for built-in domain providers */
620 if (!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT)) {
621 err = EINVAL;
622 goto done;
623 }
624
625 /* schedule this to be deleted */
626 nxdom_prov_del(nxdom_prov);
627 done:
628 /* release reference from nxdom_prov_find_uuid */
629 if (nxdom_prov != NULL) {
630 (void) nxdom_prov_release_locked(nxdom_prov);
631 }
632 SK_UNLOCK();
633
634 return err;
635 }
636
637 errno_t
kern_nexus_get_default_domain_provider(const nexus_type_t type,uuid_t * dom_prov_uuid)638 kern_nexus_get_default_domain_provider(const nexus_type_t type,
639 uuid_t *dom_prov_uuid)
640 {
641 struct kern_nexus_domain_provider *nxdom_prov;
642
643 if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) {
644 return EINVAL;
645 }
646
647 uuid_clear(*dom_prov_uuid);
648
649 /* no lock is needed; array is immutable */
650 if ((nxdom_prov = nxdom_prov_default[type]) == NULL) {
651 return ENXIO;
652 }
653
654 uuid_copy(*dom_prov_uuid, nxdom_prov->nxdom_prov_uuid);
655
656 return 0;
657 }
658
659 static int
nxdom_prov_ext_init(struct kern_nexus_domain_provider * nxdom_prov)660 nxdom_prov_ext_init(struct kern_nexus_domain_provider *nxdom_prov)
661 {
662 int err = 0;
663
664 SK_D("initializing %s", nxdom_prov->nxdom_prov_name);
665
666 ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_init != NULL);
667 if ((err = nxdom_prov->nxdom_prov_ext.nxdpi_init(nxdom_prov)) == 0) {
668 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT_INITED;
669 }
670
671 return err;
672 }
673
674 static void
nxdom_prov_ext_fini(struct kern_nexus_domain_provider * nxdom_prov)675 nxdom_prov_ext_fini(struct kern_nexus_domain_provider *nxdom_prov)
676 {
677 SK_D("destroying %s", nxdom_prov->nxdom_prov_name);
678
679 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT_INITED) {
680 ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_fini != NULL);
681 nxdom_prov->nxdom_prov_ext.nxdpi_fini(nxdom_prov);
682 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_EXT_INITED;
683 }
684 }
685
686 static struct nexus_attr *
nxa_alloc(zalloc_flags_t how)687 nxa_alloc(zalloc_flags_t how)
688 {
689 return zalloc_flags(nxa_zone, how | Z_ZERO);
690 }
691
692 static void
nxa_free(struct nexus_attr * nxa)693 nxa_free(struct nexus_attr *nxa)
694 {
695 SK_DF(SK_VERB_MEM, "nxa %p FREE", SK_KVA(nxa));
696 zfree(nxa_zone, nxa);
697 }
698
699 errno_t
kern_nexus_attr_create(nexus_attr_t * nxa)700 kern_nexus_attr_create(nexus_attr_t *nxa)
701 {
702 errno_t err = 0;
703
704 if (nxa == NULL) {
705 err = EINVAL;
706 } else {
707 *nxa = nxa_alloc(Z_WAITOK);
708 }
709 return err;
710 }
711
712 errno_t
kern_nexus_attr_clone(const nexus_attr_t nxa,nexus_attr_t * nnxa)713 kern_nexus_attr_clone(const nexus_attr_t nxa, nexus_attr_t *nnxa)
714 {
715 errno_t err = 0;
716
717 if (nnxa == NULL) {
718 err = EINVAL;
719 } else {
720 err = kern_nexus_attr_create(nnxa);
721 if (err == 0 && nxa != NULL) {
722 ASSERT(*nnxa != NULL);
723 bcopy(nxa, *nnxa, sizeof(**nnxa));
724 }
725 }
726 return err;
727 }
728
729 errno_t
kern_nexus_attr_set(const nexus_attr_t nxa,const nexus_attr_type_t type,const uint64_t value)730 kern_nexus_attr_set(const nexus_attr_t nxa,
731 const nexus_attr_type_t type, const uint64_t value)
732 {
733 return __nexus_attr_set(nxa, type, value);
734 }
735
736 errno_t
kern_nexus_attr_get(nexus_attr_t nxa,const nexus_attr_type_t type,uint64_t * value)737 kern_nexus_attr_get(nexus_attr_t nxa, const nexus_attr_type_t type,
738 uint64_t *value)
739 {
740 return __nexus_attr_get(nxa, type, value);
741 }
742
743 void
kern_nexus_attr_destroy(nexus_attr_t nxa)744 kern_nexus_attr_destroy(nexus_attr_t nxa)
745 {
746 nxa_free(nxa);
747 }
748
749 static struct nexus_controller *
ncd_alloc(zalloc_flags_t how)750 ncd_alloc(zalloc_flags_t how)
751 {
752 return zalloc_flags(ncd_zone, how | Z_ZERO);
753 }
754
755 static void
ncd_free(struct nexus_controller * ncd)756 ncd_free(struct nexus_controller *ncd)
757 {
758 SK_DF(SK_VERB_MEM, "ncd %p FREE", SK_KVA(ncd));
759 zfree(ncd_zone, ncd);
760 }
761
762 nexus_controller_t
kern_nexus_shared_controller(void)763 kern_nexus_shared_controller(void)
764 {
765 return &kernnxctl;
766 }
767
768 errno_t
kern_nexus_controller_create(nexus_controller_t * ncd)769 kern_nexus_controller_create(nexus_controller_t *ncd)
770 {
771 struct nxctl *nxctl = NULL;
772 uuid_t nxctl_uuid;
773 errno_t err = 0;
774
775 uuid_generate_random(nxctl_uuid);
776
777 if (ncd == NULL) {
778 err = EINVAL;
779 goto done;
780 } else {
781 *ncd = NULL;
782 }
783
784 nxctl = nxctl_create(kernproc, NULL, nxctl_uuid, &err);
785 if (nxctl == NULL) {
786 ASSERT(err != 0);
787 goto done;
788 }
789
790 *ncd = ncd_alloc(Z_WAITOK);
791 (*ncd)->ncd_nxctl = nxctl; /* ref from nxctl_create */
792
793 done:
794 if (err != 0) {
795 if (nxctl != NULL) {
796 nxctl_dtor(nxctl);
797 nxctl = NULL;
798 }
799 if (ncd != NULL && *ncd != NULL) {
800 ncd_free(*ncd);
801 *ncd = NULL;
802 }
803 }
804
805 return err;
806 }
807
808 #define NXPI_INVALID_CB_PAIRS(cb1, cb2) \
809 (!(init->nxpi_##cb1 == NULL && init->nxpi_##cb2 == NULL) && \
810 ((init->nxpi_##cb1 == NULL) ^ (init->nxpi_##cb2 == NULL)))
811
812 static errno_t
nexus_controller_register_provider_validate_init_params(const struct kern_nexus_provider_init * init,uint32_t init_len,nexus_type_t nxdom_type)813 nexus_controller_register_provider_validate_init_params(
814 const struct kern_nexus_provider_init *init, uint32_t init_len,
815 nexus_type_t nxdom_type)
816 {
817 errno_t err = 0;
818 struct kern_nexus_netif_provider_init *netif_init;
819
820 static_assert(__builtin_offsetof(struct kern_nexus_provider_init, nxpi_version) == 0);
821 static_assert(sizeof(init->nxpi_version) == sizeof(uint32_t));
822
823 if (init == NULL) {
824 return 0;
825 }
826
827 if (init_len < sizeof(uint32_t)) {
828 return EINVAL;
829 }
830
831 switch (init->nxpi_version) {
832 case KERN_NEXUS_PROVIDER_VERSION_1:
833 if (init_len != sizeof(struct kern_nexus_provider_init)) {
834 err = EINVAL;
835 break;
836 }
837 ASSERT(init->nxpi_rx_sync_packets == NULL);
838 ASSERT(init->nxpi_tx_sync_packets == NULL);
839 /*
840 * sync_{tx,rx} callbacks are required; the rest of the
841 * callback pairs are optional, but must be symmetrical.
842 */
843 if (init->nxpi_sync_tx == NULL || init->nxpi_sync_rx == NULL ||
844 init->nxpi_pre_connect == NULL ||
845 init->nxpi_connected == NULL ||
846 init->nxpi_pre_disconnect == NULL ||
847 init->nxpi_disconnected == NULL ||
848 NXPI_INVALID_CB_PAIRS(ring_init, ring_fini) ||
849 NXPI_INVALID_CB_PAIRS(slot_init, slot_fini)) {
850 err = EINVAL;
851 break;
852 }
853 /*
854 * Tx doorbell interface is only supported for netif and
855 * Tx doorbell is mandatory for netif
856 */
857 if (((init->nxpi_tx_doorbell != NULL) &&
858 (nxdom_type != NEXUS_TYPE_NET_IF)) ||
859 ((nxdom_type == NEXUS_TYPE_NET_IF) &&
860 (init->nxpi_tx_doorbell == NULL))) {
861 err = EINVAL;
862 break;
863 }
864 /*
865 * Capabilities configuration interface is only supported for
866 * netif.
867 */
868 if ((init->nxpi_config_capab != NULL) &&
869 (nxdom_type != NEXUS_TYPE_NET_IF)) {
870 err = EINVAL;
871 break;
872 }
873 break;
874
875 case KERN_NEXUS_PROVIDER_VERSION_NETIF:
876 if (init_len != sizeof(struct kern_nexus_netif_provider_init)) {
877 err = EINVAL;
878 break;
879 }
880 if (nxdom_type != NEXUS_TYPE_NET_IF) {
881 err = EINVAL;
882 break;
883 }
884 netif_init =
885 __DECONST(struct kern_nexus_netif_provider_init *, init);
886 if (netif_init->nxnpi_pre_connect == NULL ||
887 netif_init->nxnpi_connected == NULL ||
888 netif_init->nxnpi_pre_disconnect == NULL ||
889 netif_init->nxnpi_disconnected == NULL ||
890 netif_init->nxnpi_qset_init == NULL ||
891 netif_init->nxnpi_qset_fini == NULL ||
892 netif_init->nxnpi_queue_init == NULL ||
893 netif_init->nxnpi_queue_fini == NULL ||
894 netif_init->nxnpi_tx_qset_notify == NULL ||
895 netif_init->nxnpi_config_capab == NULL) {
896 err = EINVAL;
897 break;
898 }
899 break;
900
901 default:
902 err = EINVAL;
903 break;
904 }
905 return err;
906 }
907
908 errno_t
kern_nexus_controller_register_provider(const nexus_controller_t ncd,const uuid_t dom_prov_uuid,const nexus_name_t name,const struct kern_nexus_provider_init * init,uint32_t init_len,const nexus_attr_t nxa,uuid_t * prov_uuid)909 kern_nexus_controller_register_provider(const nexus_controller_t ncd,
910 const uuid_t dom_prov_uuid, const nexus_name_t name,
911 const struct kern_nexus_provider_init *init, uint32_t init_len,
912 const nexus_attr_t nxa, uuid_t *prov_uuid)
913 {
914 struct kern_nexus_domain_provider *nxdom_prov = NULL;
915 struct kern_nexus_provider *nxprov = NULL;
916 nexus_type_t nxdom_type;
917 struct nxprov_reg reg;
918 struct nxctl *nxctl;
919 errno_t err = 0;
920
921 if (prov_uuid == NULL) {
922 return EINVAL;
923 }
924
925 uuid_clear(*prov_uuid);
926
927 if (ncd == NULL ||
928 dom_prov_uuid == NULL || uuid_is_null(dom_prov_uuid)) {
929 return EINVAL;
930 }
931
932 nxctl = ncd->ncd_nxctl;
933 NXCTL_LOCK(nxctl);
934 SK_LOCK();
935 nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid);
936 if (nxdom_prov == NULL) {
937 SK_UNLOCK();
938 err = ENXIO;
939 goto done;
940 }
941
942 nxdom_type = nxdom_prov->nxdom_prov_dom->nxdom_type;
943 ASSERT(nxdom_type < NEXUS_TYPE_MAX);
944
945 err = nexus_controller_register_provider_validate_init_params(init,
946 init_len, nxdom_type);
947 if (err != 0) {
948 SK_UNLOCK();
949 err = EINVAL;
950 goto done;
951 }
952
953 if ((err = __nexus_provider_reg_prepare(®,
954 __unsafe_null_terminated_from_indexable(name), nxdom_type, nxa)) != 0) {
955 SK_UNLOCK();
956 goto done;
957 }
958
959 if (init && init->nxpi_version == KERN_NEXUS_PROVIDER_VERSION_NETIF) {
960 reg.nxpreg_params.nxp_flags |= NXPF_NETIF_LLINK;
961 }
962
963 /* callee will hold reference on nxdom_prov upon success */
964 if ((nxprov = nxprov_create_kern(nxctl, nxdom_prov, ®,
965 init, &err)) == NULL) {
966 SK_UNLOCK();
967 ASSERT(err != 0);
968 goto done;
969 }
970 SK_UNLOCK();
971
972 uuid_copy(*prov_uuid, nxprov->nxprov_uuid);
973
974 done:
975 SK_LOCK_ASSERT_NOTHELD();
976 NXCTL_UNLOCK(nxctl);
977
978 if (err != 0 && nxprov != NULL) {
979 err = nxprov_close(nxprov, FALSE);
980 }
981
982 /* release extra ref from nxprov_create_kern */
983 if (nxprov != NULL) {
984 nxprov_release(nxprov);
985 }
986 /* release extra ref from nxdom_prov_find_uuid */
987 if (nxdom_prov != NULL) {
988 (void) nxdom_prov_release(nxdom_prov);
989 }
990
991 return err;
992 }
993
994 #undef NXPI_INVALID_CB_PAIRS
995
996 errno_t
kern_nexus_controller_deregister_provider(const nexus_controller_t ncd,const uuid_t prov_uuid)997 kern_nexus_controller_deregister_provider(const nexus_controller_t ncd,
998 const uuid_t prov_uuid)
999 {
1000 errno_t err;
1001
1002 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid)) {
1003 err = EINVAL;
1004 } else {
1005 struct nxctl *nxctl = ncd->ncd_nxctl;
1006 NXCTL_LOCK(nxctl);
1007 err = nxprov_destroy(nxctl, prov_uuid);
1008 NXCTL_UNLOCK(nxctl);
1009 }
1010 return err;
1011 }
1012
1013 errno_t
kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd,const uuid_t prov_uuid,const void * nx_ctx,nexus_ctx_release_fn_t nx_ctx_release,uuid_t * nx_uuid,const struct kern_nexus_init * init)1014 kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd,
1015 const uuid_t prov_uuid, const void *nx_ctx,
1016 nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid,
1017 const struct kern_nexus_init *init)
1018 {
1019 struct kern_nexus *nx = NULL;
1020 struct nxctl *nxctl;
1021 errno_t err = 0;
1022
1023 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1024 nx_uuid == NULL || init == NULL ||
1025 init->nxi_version != KERN_NEXUS_CURRENT_VERSION ||
1026 (init->nxi_rx_pbufpool != NULL &&
1027 init->nxi_rx_pbufpool != init->nxi_tx_pbufpool)) {
1028 err = EINVAL;
1029 goto done;
1030 }
1031
1032 nxctl = ncd->ncd_nxctl;
1033 NXCTL_LOCK(nxctl);
1034 nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_UNDEFINED, nx_ctx,
1035 nx_ctx_release, init->nxi_tx_pbufpool, init->nxi_rx_pbufpool, &err);
1036 NXCTL_UNLOCK(nxctl);
1037 if (nx == NULL) {
1038 ASSERT(err != 0);
1039 goto done;
1040 }
1041 ASSERT(err == 0);
1042 uuid_copy(*nx_uuid, nx->nx_uuid);
1043
1044 done:
1045 /* release extra ref from nx_create */
1046 if (nx != NULL) {
1047 (void) nx_release(nx);
1048 }
1049
1050 return err;
1051 }
1052
1053 errno_t
kern_nexus_controller_alloc_net_provider_instance(const nexus_controller_t ncd,const uuid_t prov_uuid,const void * nx_ctx,nexus_ctx_release_fn_t nx_ctx_release,uuid_t * nx_uuid,const struct kern_nexus_net_init * init,struct ifnet ** pifp)1054 kern_nexus_controller_alloc_net_provider_instance(
1055 const nexus_controller_t ncd, const uuid_t prov_uuid, const void *nx_ctx,
1056 nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid,
1057 const struct kern_nexus_net_init *init, struct ifnet **pifp)
1058 {
1059 struct kern_nexus *nx = NULL;
1060 struct ifnet *__single ifp = NULL;
1061 struct nxctl *nxctl;
1062 boolean_t nxctl_locked = FALSE;
1063 errno_t err = 0;
1064
1065 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1066 nx_uuid == NULL || init == NULL ||
1067 init->nxneti_version != KERN_NEXUS_NET_CURRENT_VERSION ||
1068 init->nxneti_eparams == NULL || pifp == NULL) {
1069 err = EINVAL;
1070 goto done;
1071 }
1072
1073 /*
1074 * Skywalk native interface doesn't support legacy model.
1075 */
1076 if ((init->nxneti_eparams->start != NULL) ||
1077 (init->nxneti_eparams->flags & IFNET_INIT_LEGACY) ||
1078 (init->nxneti_eparams->flags & IFNET_INIT_INPUT_POLL)) {
1079 err = EINVAL;
1080 goto done;
1081 }
1082
1083 /* create an embryonic ifnet */
1084 err = ifnet_allocate_extended(init->nxneti_eparams, &ifp);
1085 if (err != 0) {
1086 goto done;
1087 }
1088
1089 nxctl = ncd->ncd_nxctl;
1090 NXCTL_LOCK(nxctl);
1091 nxctl_locked = TRUE;
1092
1093 nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_NET_IF, nx_ctx,
1094 nx_ctx_release, init->nxneti_tx_pbufpool, init->nxneti_rx_pbufpool,
1095 &err);
1096 if (nx == NULL) {
1097 ASSERT(err != 0);
1098 goto done;
1099 }
1100
1101 if (NX_LLINK_PROV(nx)) {
1102 if (init->nxneti_llink == NULL) {
1103 SK_ERR("logical link configuration required");
1104 err = EINVAL;
1105 goto done;
1106 }
1107 err = nx_netif_default_llink_config(NX_NETIF_PRIVATE(nx),
1108 init->nxneti_llink);
1109 if (err != 0) {
1110 goto done;
1111 }
1112 }
1113
1114 /* prepare this ifnet instance if needed */
1115 if (init->nxneti_prepare != NULL) {
1116 err = init->nxneti_prepare(nx, ifp);
1117 if (err != 0) {
1118 goto done;
1119 }
1120 }
1121
1122 /* attach embryonic ifnet to nexus */
1123 /*
1124 * XXX -fbounds-safety: Update this once __counted_by_or_null is
1125 * available (rdar://75598414)
1126 */
1127 err = _kern_nexus_ifattach(nxctl, nx->nx_uuid, ifp,
1128 __unsafe_forge_bidi_indexable(unsigned char *, NULL, sizeof(uuid_t)),
1129 FALSE, NULL);
1130
1131 if (err != 0) {
1132 goto done;
1133 }
1134
1135 /* and finalize the ifnet attach */
1136 ASSERT(nxctl_locked);
1137 NXCTL_UNLOCK(nxctl);
1138 nxctl_locked = FALSE;
1139
1140 err = ifnet_attach(ifp, init->nxneti_lladdr);
1141 if (err != 0) {
1142 goto done;
1143 }
1144
1145 ASSERT(err == 0);
1146 /*
1147 * Return ifnet reference held by ifnet_allocate_extended();
1148 * caller is expected to retain this reference until its ifnet
1149 * detach callback is called.
1150 */
1151 *pifp = ifp;
1152 uuid_copy(*nx_uuid, nx->nx_uuid);
1153
1154 done:
1155 if (nxctl_locked) {
1156 NXCTL_UNLOCK(nxctl);
1157 }
1158
1159 /* release extra ref from nx_create */
1160 if (nx != NULL) {
1161 SK_LOCK();
1162 if (err != 0) {
1163 (void) nx_close(nx, TRUE);
1164 }
1165 (void) nx_release_locked(nx);
1166 SK_UNLOCK();
1167 }
1168 if (err != 0 && ifp != NULL) {
1169 ifnet_release(ifp);
1170 }
1171
1172 return err;
1173 }
1174
1175 errno_t
kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid)1176 kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd,
1177 const uuid_t nx_uuid)
1178 {
1179 errno_t err;
1180
1181 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1182 err = EINVAL;
1183 } else {
1184 struct nxctl *nxctl = ncd->ncd_nxctl;
1185 NXCTL_LOCK(nxctl);
1186 err = nx_destroy(nxctl, nx_uuid);
1187 NXCTL_UNLOCK(nxctl);
1188 }
1189 return err;
1190 }
1191
1192 errno_t
kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid,nexus_port_t * port,const pid_t pid,const uuid_t exec_uuid,const void * key,const uint32_t key_len,const uint32_t bind_flags)1193 kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd,
1194 const uuid_t nx_uuid, nexus_port_t *port, const pid_t pid,
1195 const uuid_t exec_uuid, const void *key, const uint32_t key_len,
1196 const uint32_t bind_flags)
1197 {
1198 struct nx_bind_req nbr;
1199 struct sockopt sopt;
1200 struct nxctl *nxctl;
1201 int err = 0;
1202
1203 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1204 port == NULL) {
1205 return EINVAL;
1206 }
1207
1208 __nexus_bind_req_prepare(&nbr, nx_uuid, *port, pid, exec_uuid,
1209 key, key_len, bind_flags);
1210
1211 bzero(&sopt, sizeof(sopt));
1212 sopt.sopt_dir = SOPT_SET;
1213 sopt.sopt_name = NXOPT_NEXUS_BIND;
1214 sopt.sopt_val = (user_addr_t)&nbr;
1215 sopt.sopt_valsize = sizeof(nbr);
1216 sopt.sopt_p = kernproc;
1217
1218 nxctl = ncd->ncd_nxctl;
1219 NXCTL_LOCK(nxctl);
1220 err = nxctl_set_opt(nxctl, &sopt);
1221 NXCTL_UNLOCK(nxctl);
1222
1223 if (err == 0) {
1224 *port = nbr.nb_port;
1225 }
1226
1227 return err;
1228 }
1229
1230 errno_t
kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid,const nexus_port_t port)1231 kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd,
1232 const uuid_t nx_uuid, const nexus_port_t port)
1233 {
1234 struct nx_unbind_req nbu;
1235 struct sockopt sopt;
1236 struct nxctl *nxctl;
1237 int err = 0;
1238
1239 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1240 return EINVAL;
1241 }
1242
1243 __nexus_unbind_req_prepare(&nbu, nx_uuid, port);
1244
1245 bzero(&sopt, sizeof(sopt));
1246 sopt.sopt_dir = SOPT_SET;
1247 sopt.sopt_name = NXOPT_NEXUS_UNBIND;
1248 sopt.sopt_val = (user_addr_t)&nbu;
1249 sopt.sopt_valsize = sizeof(nbu);
1250 sopt.sopt_p = kernproc;
1251
1252 nxctl = ncd->ncd_nxctl;
1253 NXCTL_LOCK(nxctl);
1254 err = nxctl_set_opt(nxctl, &sopt);
1255 NXCTL_UNLOCK(nxctl);
1256
1257 return err;
1258 }
1259
1260 errno_t
kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd,const uuid_t prov_uuid,nexus_attr_t nxa)1261 kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd,
1262 const uuid_t prov_uuid, nexus_attr_t nxa)
1263 {
1264 struct nxprov_reg_ent nre;
1265 struct nxprov_params *p = &nre.npre_prov_params;
1266 struct sockopt sopt;
1267 struct nxctl *nxctl;
1268 int err = 0;
1269
1270 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1271 nxa == NULL) {
1272 return EINVAL;
1273 }
1274
1275 bzero(&nre, sizeof(nre));
1276 bcopy(prov_uuid, nre.npre_prov_uuid, sizeof(uuid_t));
1277
1278 bzero(&sopt, sizeof(sopt));
1279 sopt.sopt_dir = SOPT_GET;
1280 sopt.sopt_name = NXOPT_NEXUS_PROV_ENTRY;
1281 sopt.sopt_val = (user_addr_t)&nre;
1282 sopt.sopt_valsize = sizeof(nre);
1283 sopt.sopt_p = kernproc;
1284
1285 nxctl = ncd->ncd_nxctl;
1286 NXCTL_LOCK(nxctl);
1287 err = nxctl_get_opt(nxctl, &sopt);
1288 NXCTL_UNLOCK(nxctl);
1289
1290 if (err == 0) {
1291 __nexus_attr_from_params(nxa, p);
1292 }
1293
1294 return err;
1295 }
1296
1297 void
kern_nexus_controller_destroy(nexus_controller_t ncd)1298 kern_nexus_controller_destroy(nexus_controller_t ncd)
1299 {
1300 struct nxctl *nxctl;
1301
1302 if (ncd == NULL) {
1303 return;
1304 }
1305
1306 nxctl = ncd->ncd_nxctl;
1307 ASSERT(nxctl != NULL);
1308 ncd->ncd_nxctl = NULL;
1309 nxctl_dtor(nxctl);
1310
1311 ncd_free(ncd);
1312 }
1313
1314 void *
kern_nexus_get_context(const kern_nexus_t nx)1315 kern_nexus_get_context(const kern_nexus_t nx)
1316 {
1317 return nx->nx_ctx;
1318 }
1319
1320 void
kern_nexus_stop(const kern_nexus_t nx)1321 kern_nexus_stop(const kern_nexus_t nx)
1322 {
1323 SK_LOCK();
1324 nx_stop(nx);
1325 SK_UNLOCK();
1326 }
1327
1328 errno_t
kern_nexus_get_pbufpool(const kern_nexus_t nx,kern_pbufpool_t * ptx_pp,kern_pbufpool_t * prx_pp)1329 kern_nexus_get_pbufpool(const kern_nexus_t nx, kern_pbufpool_t *ptx_pp,
1330 kern_pbufpool_t *prx_pp)
1331 {
1332 kern_pbufpool_t __single tpp = NULL, rpp = NULL;
1333 int err = 0;
1334
1335 if (ptx_pp == NULL && prx_pp == NULL) {
1336 return EINVAL;
1337 }
1338
1339 if (NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info == NULL) {
1340 err = ENOTSUP;
1341 } else {
1342 err = NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info(nx, &tpp, &rpp);
1343 }
1344
1345 if (ptx_pp != NULL) {
1346 *ptx_pp = tpp;
1347 }
1348 if (prx_pp != NULL) {
1349 *prx_pp = rpp;
1350 }
1351
1352 return err;
1353 }
1354
1355 static int
_kern_nexus_ifattach(struct nxctl * nxctl,const uuid_t nx_uuid,struct ifnet * ifp,const uuid_t nx_uuid_attachee,boolean_t host,uuid_t * nx_if_uuid)1356 _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid,
1357 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
1358 uuid_t *nx_if_uuid)
1359 {
1360 struct nx_cfg_req ncr;
1361 struct nx_spec_req nsr;
1362 struct sockopt sopt;
1363 int err = 0;
1364
1365 NXCTL_LOCK_ASSERT_HELD(nxctl);
1366
1367 if (nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1368 return EINVAL;
1369 }
1370
1371 bzero(&nsr, sizeof(nsr));
1372 if (ifp != NULL) {
1373 if (nx_uuid_attachee != NULL) {
1374 return EINVAL;
1375 }
1376
1377 nsr.nsr_flags = NXSPECREQ_IFP;
1378 nsr.nsr_ifp = ifp;
1379 } else {
1380 if (nx_uuid_attachee == NULL) {
1381 return EINVAL;
1382 }
1383
1384 nsr.nsr_flags = NXSPECREQ_UUID;
1385 if (host) {
1386 nsr.nsr_flags |= NXSPECREQ_HOST;
1387 }
1388
1389 uuid_copy(nsr.nsr_uuid, nx_uuid_attachee);
1390 }
1391 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_ATTACH,
1392 &nsr, sizeof(nsr));
1393
1394 bzero(&sopt, sizeof(sopt));
1395 sopt.sopt_dir = SOPT_SET;
1396 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1397 sopt.sopt_val = (user_addr_t)&ncr;
1398 sopt.sopt_valsize = sizeof(ncr);
1399 sopt.sopt_p = kernproc;
1400
1401 err = nxctl_set_opt(nxctl, &sopt);
1402 if (err == 0 && nx_if_uuid != NULL) {
1403 uuid_copy(*nx_if_uuid, nsr.nsr_if_uuid);
1404 }
1405
1406 return err;
1407 }
1408
1409 int
kern_nexus_ifattach(nexus_controller_t ncd,const uuid_t nx_uuid,struct ifnet * ifp,const uuid_t nx_uuid_attachee,boolean_t host,uuid_t * nx_if_uuid)1410 kern_nexus_ifattach(nexus_controller_t ncd, const uuid_t nx_uuid,
1411 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
1412 uuid_t *nx_if_uuid)
1413 {
1414 struct nxctl *nxctl;
1415 int err = 0;
1416
1417 if (ncd == NULL) {
1418 return EINVAL;
1419 }
1420
1421 nxctl = ncd->ncd_nxctl;
1422 ASSERT(nxctl != NULL);
1423 NXCTL_LOCK(nxctl);
1424 err = _kern_nexus_ifattach(nxctl, nx_uuid, ifp, nx_uuid_attachee,
1425 host, nx_if_uuid);
1426 NXCTL_UNLOCK(nxctl);
1427
1428 return err;
1429 }
1430
1431 int
kern_nexus_ifdetach(const nexus_controller_t ncd,const uuid_t nx_uuid,const uuid_t nx_if_uuid)1432 kern_nexus_ifdetach(const nexus_controller_t ncd,
1433 const uuid_t nx_uuid, const uuid_t nx_if_uuid)
1434 {
1435 struct nx_cfg_req ncr;
1436 struct nx_spec_req nsr;
1437 struct sockopt sopt;
1438 struct nxctl *nxctl;
1439 int err = 0;
1440
1441 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1442 nx_if_uuid == NULL || uuid_is_null(nx_if_uuid)) {
1443 return EINVAL;
1444 }
1445
1446 bzero(&nsr, sizeof(nsr));
1447 uuid_copy(nsr.nsr_if_uuid, nx_if_uuid);
1448
1449 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_DETACH,
1450 &nsr, sizeof(nsr));
1451
1452 bzero(&sopt, sizeof(sopt));
1453 sopt.sopt_dir = SOPT_SET;
1454 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1455 sopt.sopt_val = (user_addr_t)&ncr;
1456 sopt.sopt_valsize = sizeof(ncr);
1457 sopt.sopt_p = kernproc;
1458
1459 nxctl = ncd->ncd_nxctl;
1460 NXCTL_LOCK(nxctl);
1461 err = nxctl_set_opt(nxctl, &sopt);
1462 NXCTL_UNLOCK(nxctl);
1463
1464 return err;
1465 }
1466
1467 int
kern_nexus_get_netif_instance(struct ifnet * ifp,uuid_t nx_uuid)1468 kern_nexus_get_netif_instance(struct ifnet *ifp, uuid_t nx_uuid)
1469 {
1470 struct nexus_netif_adapter *if_na;
1471 int err = 0;
1472
1473 SK_LOCK();
1474 if_na = ifp->if_na;
1475 if (if_na != NULL) {
1476 uuid_copy(nx_uuid, if_na->nifna_up.na_nx->nx_uuid);
1477 } else {
1478 err = ENXIO;
1479 }
1480 SK_UNLOCK();
1481 if (err != 0) {
1482 uuid_clear(nx_uuid);
1483 }
1484
1485 return err;
1486 }
1487
1488 int
kern_nexus_get_flowswitch_instance(struct ifnet * ifp,uuid_t nx_uuid)1489 kern_nexus_get_flowswitch_instance(struct ifnet *ifp, uuid_t nx_uuid)
1490 {
1491 struct nexus_netif_adapter *if_na;
1492 struct nx_flowswitch *fsw = NULL;
1493 int err = 0;
1494
1495 SK_LOCK();
1496 if_na = ifp->if_na;
1497 if (if_na != NULL) {
1498 fsw = ifp->if_na->nifna_netif->nif_fsw;
1499 }
1500 if (fsw != NULL) {
1501 uuid_copy(nx_uuid, fsw->fsw_nx->nx_uuid);
1502 } else {
1503 err = ENXIO;
1504 }
1505 SK_UNLOCK();
1506 if (err != 0) {
1507 uuid_clear(nx_uuid);
1508 }
1509
1510 return err;
1511 }
1512
1513 static void
kern_nexus_netagent_add(struct kern_nexus * nx,void * arg0)1514 kern_nexus_netagent_add(struct kern_nexus *nx, void *arg0)
1515 {
1516 #pragma unused(arg0)
1517 nx_fsw_netagent_add(nx);
1518 }
1519
1520 static void
kern_nexus_netagent_remove(struct kern_nexus * nx,void * arg0)1521 kern_nexus_netagent_remove(struct kern_nexus *nx, void *arg0)
1522 {
1523 #pragma unused(arg0)
1524 nx_fsw_netagent_remove(nx);
1525 }
1526
1527 static void
kern_nexus_netagent_update(struct kern_nexus * nx,void * arg0)1528 kern_nexus_netagent_update(struct kern_nexus *nx, void *arg0)
1529 {
1530 #pragma unused(arg0)
1531 nx_fsw_netagent_update(nx);
1532 }
1533
1534 void
kern_nexus_register_netagents(void)1535 kern_nexus_register_netagents(void)
1536 {
1537 kern_nexus_walktree(kern_nexus_netagent_add, NULL, FALSE);
1538 }
1539
1540 void
kern_nexus_deregister_netagents(void)1541 kern_nexus_deregister_netagents(void)
1542 {
1543 kern_nexus_walktree(kern_nexus_netagent_remove, NULL, FALSE);
1544 }
1545
1546 void
kern_nexus_update_netagents(void)1547 kern_nexus_update_netagents(void)
1548 {
1549 kern_nexus_walktree(kern_nexus_netagent_update, NULL, FALSE);
1550 }
1551
1552 static int
_interface_add_remove_netagent(struct ifnet * ifp,bool add)1553 _interface_add_remove_netagent(struct ifnet *ifp, bool add)
1554 {
1555 struct nexus_netif_adapter *if_na;
1556 int err = ENXIO;
1557
1558 SK_LOCK();
1559 if_na = ifp->if_na;
1560 if (if_na != NULL) {
1561 struct nx_flowswitch *fsw;
1562
1563 fsw = if_na->nifna_netif->nif_fsw;
1564 if (fsw != NULL) {
1565 if (add) {
1566 err = nx_fsw_netagent_add(fsw->fsw_nx);
1567 } else {
1568 err = nx_fsw_netagent_remove(fsw->fsw_nx);
1569 }
1570 }
1571 }
1572 SK_UNLOCK();
1573 return err;
1574 }
1575
1576 int
kern_nexus_interface_add_netagent(struct ifnet * ifp)1577 kern_nexus_interface_add_netagent(struct ifnet *ifp)
1578 {
1579 return _interface_add_remove_netagent(ifp, true);
1580 }
1581
1582 int
kern_nexus_interface_remove_netagent(struct ifnet * ifp)1583 kern_nexus_interface_remove_netagent(struct ifnet *ifp)
1584 {
1585 return _interface_add_remove_netagent(ifp, false);
1586 }
1587
1588 int
kern_nexus_set_netif_input_tbr_rate(struct ifnet * ifp,uint64_t rate)1589 kern_nexus_set_netif_input_tbr_rate(struct ifnet *ifp, uint64_t rate)
1590 {
1591 /* input tbr is only functional with active netif attachment */
1592 if (ifp->if_na == NULL) {
1593 if (rate != 0) {
1594 return EINVAL;
1595 } else {
1596 return 0;
1597 }
1598 }
1599
1600 ifp->if_na->nifna_netif->nif_input_rate = rate;
1601 return 0;
1602 }
1603
1604 int
kern_nexus_set_if_netem_params(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1605 kern_nexus_set_if_netem_params(const nexus_controller_t ncd,
1606 const uuid_t nx_uuid, void *data, size_t data_len)
1607 {
1608 struct nx_cfg_req ncr;
1609 struct sockopt sopt;
1610 struct nxctl *nxctl;
1611 int err = 0;
1612
1613 if (nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1614 data_len < sizeof(struct if_netem_params)) {
1615 return EINVAL;
1616 }
1617
1618 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_NETEM,
1619 data, data_len);
1620 bzero(&sopt, sizeof(sopt));
1621 sopt.sopt_dir = SOPT_SET;
1622 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1623 sopt.sopt_val = (user_addr_t)&ncr;
1624 sopt.sopt_valsize = sizeof(ncr);
1625 sopt.sopt_p = kernproc;
1626
1627 nxctl = ncd->ncd_nxctl;
1628 NXCTL_LOCK(nxctl);
1629 err = nxctl_set_opt(nxctl, &sopt);
1630 NXCTL_UNLOCK(nxctl);
1631
1632 return err;
1633 }
1634
1635 static int
_kern_nexus_flow_config(const nexus_controller_t ncd,const uuid_t nx_uuid,const nxcfg_cmd_t cmd,void * data,size_t data_len)1636 _kern_nexus_flow_config(const nexus_controller_t ncd, const uuid_t nx_uuid,
1637 const nxcfg_cmd_t cmd, void *data, size_t data_len)
1638 {
1639 struct nx_cfg_req ncr;
1640 struct sockopt sopt;
1641 struct nxctl *nxctl;
1642 int err = 0;
1643
1644 if (nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1645 data_len < sizeof(struct nx_flow_req)) {
1646 return EINVAL;
1647 }
1648
1649 __nexus_config_req_prepare(&ncr, nx_uuid, cmd, data, data_len);
1650
1651 bzero(&sopt, sizeof(sopt));
1652 sopt.sopt_dir = SOPT_SET;
1653 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1654 sopt.sopt_val = (user_addr_t)&ncr;
1655 sopt.sopt_valsize = sizeof(ncr);
1656 sopt.sopt_p = kernproc;
1657
1658 nxctl = ncd->ncd_nxctl;
1659 NXCTL_LOCK(nxctl);
1660 err = nxctl_set_opt(nxctl, &sopt);
1661 NXCTL_UNLOCK(nxctl);
1662
1663 return err;
1664 }
1665
1666 int
kern_nexus_flow_add(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1667 kern_nexus_flow_add(const nexus_controller_t ncd, const uuid_t nx_uuid,
1668 void *data, size_t data_len)
1669 {
1670 return _kern_nexus_flow_config(ncd, nx_uuid, NXCFG_CMD_FLOW_ADD, data,
1671 data_len);
1672 }
1673
1674 int
kern_nexus_flow_del(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1675 kern_nexus_flow_del(const nexus_controller_t ncd, const uuid_t nx_uuid,
1676 void *data, size_t data_len)
1677 {
1678 return _kern_nexus_flow_config(ncd, nx_uuid, NXCFG_CMD_FLOW_DEL, data,
1679 data_len);
1680 }
1681
1682 static struct kern_nexus_domain_provider *
nxdom_prov_alloc(zalloc_flags_t how)1683 nxdom_prov_alloc(zalloc_flags_t how)
1684 {
1685 SK_LOCK_ASSERT_HELD();
1686
1687 return zalloc_flags(nxdom_prov_zone, how | Z_ZERO);
1688 }
1689
1690 static void
nxdom_prov_free(struct kern_nexus_domain_provider * nxdom_prov)1691 nxdom_prov_free(struct kern_nexus_domain_provider *nxdom_prov)
1692 {
1693 SK_LOCK_ASSERT_HELD();
1694
1695 ASSERT(nxdom_prov->nxdom_prov_refcnt == 0);
1696 ASSERT(!(nxdom_prov->nxdom_prov_flags &
1697 (NXDOMPROVF_ATTACHED | NXDOMPROVF_DETACHING)));
1698
1699 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED) {
1700 /*
1701 * Tell the domain provider that we're done with this
1702 * instance, and it is now free to go away.
1703 */
1704 if (nxdom_prov->nxdom_prov_fini != NULL) {
1705 nxdom_prov->nxdom_prov_fini(nxdom_prov);
1706 }
1707 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_INITIALIZED;
1708 }
1709 uuid_clear(nxdom_prov->nxdom_prov_uuid);
1710 nxdom_prov->nxdom_prov_dom = NULL;
1711
1712 SK_DF(SK_VERB_MEM, "nxdom_prov %p %s", SK_KVA(nxdom_prov),
1713 ((nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) ?
1714 "FREE" : "DESTROY"));
1715 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) {
1716 zfree(nxdom_prov_zone, nxdom_prov);
1717 }
1718 }
1719
1720 void
nxdom_prov_retain_locked(struct kern_nexus_domain_provider * nxdom_prov)1721 nxdom_prov_retain_locked(struct kern_nexus_domain_provider *nxdom_prov)
1722 {
1723 SK_LOCK_ASSERT_HELD();
1724
1725 nxdom_prov->nxdom_prov_refcnt++;
1726 ASSERT(nxdom_prov->nxdom_prov_refcnt != 0);
1727 }
1728
1729 void
nxdom_prov_retain(struct kern_nexus_domain_provider * nxdom_prov)1730 nxdom_prov_retain(struct kern_nexus_domain_provider *nxdom_prov)
1731 {
1732 SK_LOCK();
1733 nxdom_prov_retain_locked(nxdom_prov);
1734 SK_UNLOCK();
1735 }
1736
1737 static int
nxdom_prov_params_default(struct kern_nexus_domain_provider * nxdom_prov,const uint32_t req,const struct nxprov_params * nxp0,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],uint32_t pp_region_config_flags)1738 nxdom_prov_params_default(struct kern_nexus_domain_provider *nxdom_prov,
1739 const uint32_t req, const struct nxprov_params *nxp0,
1740 struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS],
1741 uint32_t pp_region_config_flags)
1742 {
1743 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
1744
1745 return nxprov_params_adjust(nxdom_prov, req, nxp0, nxp, srp,
1746 nxdom, nxdom, nxdom, pp_region_config_flags, NULL);
1747 }
1748
1749 int
nxdom_prov_validate_params(struct kern_nexus_domain_provider * nxdom_prov,const struct nxprov_reg * reg,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],const uint32_t oflags,uint32_t pp_region_config_flags)1750 nxdom_prov_validate_params(struct kern_nexus_domain_provider *nxdom_prov,
1751 const struct nxprov_reg *reg, struct nxprov_params *nxp,
1752 struct skmem_region_params srp[SKMEM_REGIONS], const uint32_t oflags,
1753 uint32_t pp_region_config_flags)
1754 {
1755 const struct nxprov_params *nxp0 = ®->nxpreg_params;
1756 const uint32_t req = reg->nxpreg_requested;
1757 int i, err = 0;
1758
1759 ASSERT(reg->nxpreg_version == NXPROV_REG_CURRENT_VERSION &&
1760 nxp0->nxp_namelen != 0 &&
1761 nxp0->nxp_namelen <= sizeof(nexus_name_t));
1762
1763 /* fill in with default values and let the nexus override them */
1764 bzero(nxp, sizeof(*nxp));
1765 bcopy(&nxp0->nxp_name, &nxp->nxp_name, sizeof(nxp->nxp_name));
1766 nxp->nxp_name[sizeof(nxp->nxp_name) - 1] = '\0';
1767 nxp->nxp_namelen = nxp0->nxp_namelen;
1768 nxp->nxp_type = nxp0->nxp_type;
1769 nxp->nxp_md_type = nxdom_prov->nxdom_prov_dom->nxdom_md_type;
1770 nxp->nxp_md_subtype = nxdom_prov->nxdom_prov_dom->nxdom_md_subtype;
1771 nxp->nxp_flags = (nxp0->nxp_flags & NXPF_MASK);
1772 nxp->nxp_flags |= oflags; /* override */
1773 nxp->nxp_format = nxp0->nxp_format;
1774 nxp->nxp_ifindex = nxp0->nxp_ifindex;
1775 nxp->nxp_reject_on_close = nxp0->nxp_reject_on_close;
1776
1777 /* inherit default region parameters */
1778 for (i = 0; i < SKMEM_REGIONS; i++) {
1779 srp[i] = *skmem_get_default(i);
1780 }
1781
1782 if (nxdom_prov->nxdom_prov_params != NULL) {
1783 err = nxdom_prov->nxdom_prov_params(nxdom_prov, req, nxp0,
1784 nxp, srp, pp_region_config_flags);
1785 } else {
1786 err = nxdom_prov_params_default(nxdom_prov, req, nxp0,
1787 nxp, srp, pp_region_config_flags);
1788 }
1789 return err;
1790 }
1791
1792 boolean_t
nxdom_prov_release_locked(struct kern_nexus_domain_provider * nxdom_prov)1793 nxdom_prov_release_locked(struct kern_nexus_domain_provider *nxdom_prov)
1794 {
1795 int oldref = nxdom_prov->nxdom_prov_refcnt;
1796
1797 SK_LOCK_ASSERT_HELD();
1798
1799 ASSERT(nxdom_prov->nxdom_prov_refcnt != 0);
1800 if (--nxdom_prov->nxdom_prov_refcnt == 0) {
1801 nxdom_prov_free(nxdom_prov);
1802 }
1803
1804 return oldref == 1;
1805 }
1806
1807 boolean_t
nxdom_prov_release(struct kern_nexus_domain_provider * nxdom_prov)1808 nxdom_prov_release(struct kern_nexus_domain_provider *nxdom_prov)
1809 {
1810 boolean_t lastref;
1811
1812 SK_LOCK();
1813 lastref = nxdom_prov_release_locked(nxdom_prov);
1814 SK_UNLOCK();
1815
1816 return lastref;
1817 }
1818
1819 static uint32_t
nxprov_bound_var(uint32_t * v,uint32_t dflt,uint32_t lo,uint32_t hi,const char * msg)1820 nxprov_bound_var(uint32_t *v, uint32_t dflt, uint32_t lo, uint32_t hi,
1821 const char *msg)
1822 {
1823 #pragma unused(msg)
1824 uint32_t oldv = *v;
1825 const char *op = NULL;
1826
1827 if (dflt < lo) {
1828 dflt = lo;
1829 }
1830 if (dflt > hi) {
1831 dflt = hi;
1832 }
1833 if (oldv < lo) {
1834 *v = dflt;
1835 op = "bump";
1836 } else if (oldv > hi) {
1837 *v = hi;
1838 op = "clamp";
1839 }
1840 #if SK_LOG
1841 if (op != NULL && msg != NULL) {
1842 SK_ERR("%s %s to %u (was %u)", op, msg, *v, oldv);
1843 }
1844 #endif /* SK_LOG */
1845 return *v;
1846 }
1847
1848 #define NXPROV_PARAMS_ADJUST(flag, param) do { \
1849 uint32_t _v0, _v; \
1850 if (req & (flag)) \
1851 _v = nxp0->nxp_##param; \
1852 else \
1853 _v = NXDOM_DEF(nxdom_def, param); \
1854 _v0 = _v; \
1855 if (nxprov_bound_var(&_v, NXDOM_DEF(nxdom_def, param), \
1856 NXDOM_MIN(nxdom_min, param), NXDOM_MAX(nxdom_max, param), \
1857 "nxp_" #param) < _v0) { \
1858 err = ENOMEM; \
1859 goto error; \
1860 } \
1861 nxp->nxp_##param = _v; \
1862 } while (0)
1863
1864 #define MUL(x, y, z) do { \
1865 if (__builtin_mul_overflow((x), (y), (z))) { \
1866 overflowline = __LINE__; \
1867 goto error; \
1868 } \
1869 } while (0)
1870
1871 #define ADD(x, y, z) do { \
1872 if (__builtin_add_overflow((x), (y), (z))) { \
1873 overflowline = __LINE__; \
1874 goto error; \
1875 } \
1876 } while (0)
1877
1878 int
nxprov_params_adjust(struct kern_nexus_domain_provider * nxdom_prov,const uint32_t req,const struct nxprov_params * nxp0,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],const struct nxdom * nxdom_def,const struct nxdom * nxdom_min,const struct nxdom * nxdom_max,uint32_t pp_region_config_flags,int (* adjust_fn)(const struct kern_nexus_domain_provider *,const struct nxprov_params *,struct nxprov_adjusted_params *))1879 nxprov_params_adjust(struct kern_nexus_domain_provider *nxdom_prov,
1880 const uint32_t req, const struct nxprov_params *nxp0,
1881 struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS],
1882 const struct nxdom *nxdom_def, const struct nxdom *nxdom_min,
1883 const struct nxdom *nxdom_max, uint32_t pp_region_config_flags,
1884 int (*adjust_fn)(const struct kern_nexus_domain_provider *,
1885 const struct nxprov_params *, struct nxprov_adjusted_params *))
1886 {
1887 uint32_t buf_cnt;
1888 uint32_t stats_size;
1889 uint32_t flowadv_max;
1890 uint32_t nexusadv_size;
1891 uint32_t capabs;
1892 uint32_t tx_rings, rx_rings;
1893 uint32_t alloc_rings = 0, free_rings = 0, ev_rings = 0;
1894 uint32_t tx_slots, rx_slots;
1895 uint32_t alloc_slots = 0, free_slots = 0, ev_slots = 0;
1896 uint32_t buf_size, buf_region_segment_size, max_buffers = 0;
1897 uint32_t tmp1, tmp2, tmp3, tmp4xpipes, tmpsumrings;
1898 uint32_t tmpsumall, tmp4xpipesplusrings;
1899 uint32_t large_buf_size;
1900 int overflowline = 0;
1901 int err = 0;
1902
1903 NXPROV_PARAMS_ADJUST(NXPREQ_TX_RINGS, tx_rings);
1904 NXPROV_PARAMS_ADJUST(NXPREQ_RX_RINGS, rx_rings);
1905 NXPROV_PARAMS_ADJUST(NXPREQ_TX_SLOTS, tx_slots);
1906 NXPROV_PARAMS_ADJUST(NXPREQ_RX_SLOTS, rx_slots);
1907 NXPROV_PARAMS_ADJUST(NXPREQ_BUF_SIZE, buf_size);
1908 NXPROV_PARAMS_ADJUST(NXPREQ_LARGE_BUF_SIZE, large_buf_size);
1909 NXPROV_PARAMS_ADJUST(NXPREQ_STATS_SIZE, stats_size);
1910 NXPROV_PARAMS_ADJUST(NXPREQ_FLOWADV_MAX, flowadv_max);
1911 NXPROV_PARAMS_ADJUST(NXPREQ_NEXUSADV_SIZE, nexusadv_size);
1912 NXPROV_PARAMS_ADJUST(NXPREQ_PIPES, pipes);
1913 NXPROV_PARAMS_ADJUST(NXPREQ_EXTENSIONS, extensions);
1914 NXPROV_PARAMS_ADJUST(NXPREQ_MHINTS, mhints);
1915 NXPROV_PARAMS_ADJUST(NXPREQ_CAPABILITIES, capabilities);
1916 NXPROV_PARAMS_ADJUST(NXPREQ_QMAP, qmap);
1917 NXPROV_PARAMS_ADJUST(NXPREQ_MAX_FRAGS, max_frags);
1918
1919 capabs = NXDOM_DEF(nxdom_def, capabilities);
1920 if (req & NXPREQ_USER_CHANNEL) {
1921 if (nxp->nxp_flags & NXPF_USER_CHANNEL) {
1922 capabs |= NXPCAP_USER_CHANNEL;
1923 } else {
1924 capabs &= ~NXPCAP_USER_CHANNEL;
1925 }
1926 } else {
1927 if (capabs & NXPCAP_USER_CHANNEL) {
1928 nxp->nxp_flags |= NXPF_USER_CHANNEL;
1929 } else {
1930 nxp->nxp_flags &= ~NXPF_USER_CHANNEL;
1931 }
1932 }
1933
1934 if (NXDOM_MIN(nxdom_min, capabilities) != 0 &&
1935 !(capabs & NXDOM_MIN(nxdom_min, capabilities))) {
1936 SK_ERR("%s: caps 0x%x < min 0x%x", nxdom_prov->nxdom_prov_name,
1937 capabs, NXDOM_MIN(nxdom_min, capabilities));
1938 err = EINVAL;
1939 goto error;
1940 } else if (NXDOM_MAX(nxdom_max, capabilities) != 0 &&
1941 (capabs & ~NXDOM_MAX(nxdom_max, capabilities))) {
1942 SK_ERR("%s: caps 0x%x > max 0x%x", nxdom_prov->nxdom_prov_name,
1943 capabs, NXDOM_MAX(nxdom_max, capabilities));
1944 err = EINVAL;
1945 goto error;
1946 }
1947
1948 stats_size = nxp->nxp_stats_size;
1949 flowadv_max = nxp->nxp_flowadv_max;
1950 nexusadv_size = nxp->nxp_nexusadv_size;
1951 tx_rings = nxp->nxp_tx_rings;
1952 rx_rings = nxp->nxp_rx_rings;
1953 tx_slots = nxp->nxp_tx_slots;
1954 rx_slots = nxp->nxp_rx_slots;
1955 buf_size = nxp->nxp_buf_size;
1956 large_buf_size = nxp->nxp_large_buf_size;
1957 buf_region_segment_size = skmem_usr_buf_seg_size;
1958 ASSERT(pp_region_config_flags & PP_REGION_CONFIG_MD_MAGAZINE_ENABLE);
1959
1960 if (adjust_fn != NULL) {
1961 struct nxprov_adjusted_params adj = {
1962 .adj_md_subtype = &nxp->nxp_md_subtype,
1963 .adj_stats_size = &stats_size,
1964 .adj_flowadv_max = &flowadv_max,
1965 .adj_nexusadv_size = &nexusadv_size,
1966 .adj_caps = &capabs,
1967 .adj_tx_rings = &tx_rings,
1968 .adj_rx_rings = &rx_rings,
1969 .adj_tx_slots = &tx_slots,
1970 .adj_rx_slots = &rx_slots,
1971 .adj_alloc_rings = &alloc_rings,
1972 .adj_free_rings = &free_rings,
1973 .adj_alloc_slots = &alloc_slots,
1974 .adj_free_slots = &free_slots,
1975 .adj_buf_size = &buf_size,
1976 .adj_buf_region_segment_size = &buf_region_segment_size,
1977 .adj_pp_region_config_flags = &pp_region_config_flags,
1978 .adj_max_frags = &nxp->nxp_max_frags,
1979 .adj_event_rings = &ev_rings,
1980 .adj_event_slots = &ev_slots,
1981 .adj_max_buffers = &max_buffers,
1982 .adj_large_buf_size = &large_buf_size,
1983 };
1984 err = adjust_fn(nxdom_prov, nxp, &adj);
1985 if (err != 0) {
1986 goto error;
1987 }
1988
1989 ASSERT(capabs >= NXDOM_MIN(nxdom_min, capabilities));
1990 ASSERT(capabs <= NXDOM_MAX(nxdom_max, capabilities));
1991 }
1992
1993 if (nxp->nxp_max_frags > UINT16_MAX) {
1994 SK_ERR("invalid configuration for max frags %d",
1995 nxp->nxp_max_frags);
1996 err = EINVAL;
1997 }
1998
1999 if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) {
2000 if (tx_rings != rx_rings) {
2001 SK_ERR("invalid configuration: {rx,tx} rings must be"
2002 "in pairs for user pipe rx_rings(%d) tx_rings(%d)",
2003 rx_rings, tx_rings);
2004 err = EINVAL;
2005 }
2006 } else {
2007 if (nxp->nxp_pipes != 0) {
2008 SK_ERR("invalid configuration: pipe configuration is"
2009 "only valid for user pipe nexus, type %d, pipes %d",
2010 nxp->nxp_type, nxp->nxp_pipes);
2011 err = EINVAL;
2012 }
2013 }
2014 if (err != 0) {
2015 goto error;
2016 }
2017
2018 /* leading and trailing guard pages (if applicable) */
2019 if (sk_guard) {
2020 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = SKMEM_PAGE_SIZE;
2021 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = sk_headguard_sz;
2022 skmem_region_params_config(&srp[SKMEM_REGION_GUARD_HEAD]);
2023 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = SKMEM_PAGE_SIZE;
2024 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = sk_tailguard_sz;
2025 skmem_region_params_config(&srp[SKMEM_REGION_GUARD_TAIL]);
2026 } else {
2027 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = 0;
2028 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = 0;
2029 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = 0;
2030 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = 0;
2031 }
2032
2033 /* update to the adjusted/configured values */
2034 nxp->nxp_buf_size = buf_size;
2035 nxp->nxp_tx_slots = tx_slots;
2036 nxp->nxp_rx_slots = rx_slots;
2037 nxp->nxp_large_buf_size = large_buf_size;
2038
2039 SK_D("nxdom \"%s\" (%p) type %d",
2040 nxdom_prov->nxdom_prov_dom->nxdom_name,
2041 SK_KVA(nxdom_prov->nxdom_prov_dom),
2042 nxdom_prov->nxdom_prov_dom->nxdom_type);
2043 SK_D("nxp \"%s\" (%p) flags 0x%x",
2044 nxp->nxp_name, SK_KVA(nxp), nxp->nxp_flags);
2045 SK_D(" req 0x%x rings %u/%u/%u/%u/%u slots %u/%u/%u/%u/%u buf %u "
2046 "type %u subtype %u stats %u flowadv_max %u nexusadv_size %u "
2047 "capabs 0x%x pipes %u extensions %u max_frags %u headguard %u "
2048 "tailguard %u large_buf %u", req, tx_rings, rx_rings,
2049 alloc_rings, free_rings, ev_rings, tx_slots, rx_slots, alloc_slots,
2050 free_slots, ev_slots, nxp->nxp_buf_size, nxp->nxp_md_type,
2051 nxp->nxp_md_subtype, stats_size, flowadv_max, nexusadv_size,
2052 capabs, nxp->nxp_pipes, nxp->nxp_extensions, nxp->nxp_max_frags,
2053 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size *
2054 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt,
2055 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size *
2056 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt,
2057 nxp->nxp_large_buf_size);
2058
2059 /*
2060 * tmp4xpipes = 4 * nxp->nxp_pipes
2061 */
2062 MUL(4, nxp->nxp_pipes, &tmp4xpipes);
2063
2064 /*
2065 * tmp4xpipesplusrings = tx_rings + (4 * nxp->nxp_pipes)
2066 */
2067 VERIFY((tmp4xpipes == 0) || (rx_rings == tx_rings));
2068 ADD(tx_rings, tmp4xpipes, &tmp4xpipesplusrings);
2069
2070 /*
2071 * tmpsumrings = tx_rings + rx_rings + alloc_rings + free_rings +
2072 * ev_rings
2073 */
2074 ADD(tx_rings, rx_rings, &tmpsumrings);
2075 ADD(tmpsumrings, alloc_rings, &tmpsumrings);
2076 ADD(tmpsumrings, free_rings, &tmpsumrings);
2077 ADD(tmpsumrings, ev_rings, &tmpsumrings);
2078
2079 /*
2080 * tmpsumall = (tx_rings + rx_rings +
2081 * alloc_rings + free_rings + ev_rings + (4 * nxp->nxp_pipes))
2082 */
2083 ADD(tmpsumrings, tmp4xpipes, &tmpsumall);
2084
2085 /* possibly increase them to fit user request */
2086 VERIFY(CHANNEL_SCHEMA_SIZE(tmpsumrings) <= UINT32_MAX);
2087 srp[SKMEM_REGION_SCHEMA].srp_r_obj_size =
2088 (uint32_t)CHANNEL_SCHEMA_SIZE(tmpsumrings);
2089 /* worst case is one channel bound to each ring pair */
2090 srp[SKMEM_REGION_SCHEMA].srp_r_obj_cnt = tmp4xpipesplusrings;
2091
2092 skmem_region_params_config(&srp[SKMEM_REGION_SCHEMA]);
2093
2094 srp[SKMEM_REGION_RING].srp_r_obj_size =
2095 sizeof(struct __user_channel_ring);
2096 /* each pipe endpoint needs two tx rings and two rx rings */
2097 srp[SKMEM_REGION_RING].srp_r_obj_cnt = tmpsumall;
2098 skmem_region_params_config(&srp[SKMEM_REGION_RING]);
2099
2100 /*
2101 * For each pipe we only need the buffers for the "real" rings.
2102 * On the other end, the pipe ring dimension may be different from
2103 * the parent port ring dimension. As a compromise, we allocate twice
2104 * the space actually needed if the pipe rings were the same size as
2105 * the parent rings.
2106 *
2107 * buf_cnt = ((4 * nxp->nxp_pipes) + rx_rings) * rx_slots +
2108 * ((4 * nxp->nxp_pipes) + tx_rings) * tx_slots +
2109 * (ev_rings * ev_slots);
2110 */
2111 if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) {
2112 MUL(tmp4xpipesplusrings, rx_slots, &tmp1);
2113 MUL(tmp4xpipesplusrings, tx_slots, &tmp2);
2114 ASSERT(ev_rings == 0);
2115 tmp3 = 0;
2116 } else {
2117 MUL(rx_rings, rx_slots, &tmp1);
2118 MUL(tx_rings, tx_slots, &tmp2);
2119 MUL(ev_rings, ev_slots, &tmp3);
2120 }
2121 ADD(tmp1, tmp2, &buf_cnt);
2122 ADD(tmp3, buf_cnt, &buf_cnt);
2123
2124 if (nxp->nxp_max_frags > 1) {
2125 pp_region_config_flags |= PP_REGION_CONFIG_BUFLET;
2126 buf_cnt = MIN((((uint32_t)P2ROUNDUP(NX_MAX_AGGR_PKT_SIZE,
2127 nxp->nxp_buf_size) / nxp->nxp_buf_size) * buf_cnt),
2128 (buf_cnt * nxp->nxp_max_frags));
2129 }
2130
2131 if (max_buffers != 0) {
2132 buf_cnt = MIN(max_buffers, buf_cnt);
2133 }
2134
2135 if ((nxp->nxp_flags & NXPF_USER_CHANNEL) == 0) {
2136 pp_region_config_flags |= PP_REGION_CONFIG_KERNEL_ONLY;
2137 }
2138
2139 /* # of metadata objects is same as the # of buffer objects */
2140 ASSERT(buf_region_segment_size != 0);
2141 pp_regions_params_adjust(srp, nxp->nxp_md_type, nxp->nxp_md_subtype,
2142 buf_cnt, (uint16_t)nxp->nxp_max_frags, nxp->nxp_buf_size,
2143 nxp->nxp_large_buf_size, buf_cnt, buf_region_segment_size,
2144 pp_region_config_flags);
2145
2146 /* statistics region size */
2147 if (stats_size != 0) {
2148 srp[SKMEM_REGION_USTATS].srp_r_obj_size = stats_size;
2149 srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 1;
2150 skmem_region_params_config(&srp[SKMEM_REGION_USTATS]);
2151 } else {
2152 srp[SKMEM_REGION_USTATS].srp_r_obj_size = 0;
2153 srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 0;
2154 srp[SKMEM_REGION_USTATS].srp_c_obj_size = 0;
2155 srp[SKMEM_REGION_USTATS].srp_c_obj_cnt = 0;
2156 }
2157
2158 /* flow advisory region size */
2159 if (flowadv_max != 0) {
2160 static_assert(NX_FLOWADV_DEFAULT * sizeof(struct __flowadv_entry) <= SKMEM_MIN_SEG_SIZE);
2161 MUL(sizeof(struct __flowadv_entry), flowadv_max, &tmp1);
2162 srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = tmp1;
2163 srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 1;
2164 skmem_region_params_config(&srp[SKMEM_REGION_FLOWADV]);
2165 } else {
2166 srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = 0;
2167 srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 0;
2168 srp[SKMEM_REGION_FLOWADV].srp_c_obj_size = 0;
2169 srp[SKMEM_REGION_FLOWADV].srp_c_obj_cnt = 0;
2170 }
2171
2172 /* nexus advisory region size */
2173 if (nexusadv_size != 0) {
2174 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = nexusadv_size +
2175 sizeof(struct __kern_nexus_adv_metadata);
2176 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 1;
2177 skmem_region_params_config(&srp[SKMEM_REGION_NEXUSADV]);
2178 } else {
2179 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = 0;
2180 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 0;
2181 srp[SKMEM_REGION_NEXUSADV].srp_c_obj_size = 0;
2182 srp[SKMEM_REGION_NEXUSADV].srp_c_obj_cnt = 0;
2183 }
2184
2185 /* sysctls region is not applicable to nexus */
2186 srp[SKMEM_REGION_SYSCTLS].srp_r_obj_size = 0;
2187 srp[SKMEM_REGION_SYSCTLS].srp_r_obj_cnt = 0;
2188 srp[SKMEM_REGION_SYSCTLS].srp_c_obj_size = 0;
2189 srp[SKMEM_REGION_SYSCTLS].srp_c_obj_cnt = 0;
2190
2191 /*
2192 * Since the tx/alloc/event slots share the same region and cache,
2193 * we will use the same object size for both types of slots.
2194 */
2195 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size =
2196 (MAX(MAX(tx_slots, alloc_slots), ev_slots)) * SLOT_DESC_SZ;
2197 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt = tx_rings + alloc_rings +
2198 ev_rings;
2199 skmem_region_params_config(&srp[SKMEM_REGION_TXAKSD]);
2200
2201 /* USD and KSD objects share the same size and count */
2202 srp[SKMEM_REGION_TXAUSD].srp_r_obj_size =
2203 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size;
2204 srp[SKMEM_REGION_TXAUSD].srp_r_obj_cnt =
2205 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt;
2206 skmem_region_params_config(&srp[SKMEM_REGION_TXAUSD]);
2207
2208 /*
2209 * Since the rx/free slots share the same region and cache,
2210 * we will use the same object size for both types of slots.
2211 */
2212 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size =
2213 MAX(rx_slots, free_slots) * SLOT_DESC_SZ;
2214 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt = rx_rings + free_rings;
2215 skmem_region_params_config(&srp[SKMEM_REGION_RXFKSD]);
2216
2217 /* USD and KSD objects share the same size and count */
2218 srp[SKMEM_REGION_RXFUSD].srp_r_obj_size =
2219 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size;
2220 srp[SKMEM_REGION_RXFUSD].srp_r_obj_cnt =
2221 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt;
2222 skmem_region_params_config(&srp[SKMEM_REGION_RXFUSD]);
2223
2224 /* update these based on the adjusted/configured values */
2225 nxp->nxp_meta_size = srp[SKMEM_REGION_KMD].srp_c_obj_size;
2226 nxp->nxp_stats_size = stats_size;
2227 nxp->nxp_flowadv_max = flowadv_max;
2228 nxp->nxp_nexusadv_size = nexusadv_size;
2229 nxp->nxp_capabilities = capabs;
2230
2231 error:
2232 if (overflowline) {
2233 err = EOVERFLOW;
2234 SK_ERR("math overflow in %s on line %d",
2235 __func__, overflowline);
2236 }
2237 return err;
2238 }
2239
2240 #undef ADD
2241 #undef MUL
2242 #undef NXPROV_PARAMS_ADJUST
2243
2244 static void
nxprov_detaching_enqueue(struct kern_nexus_domain_provider * nxdom_prov)2245 nxprov_detaching_enqueue(struct kern_nexus_domain_provider *nxdom_prov)
2246 {
2247 SK_LOCK_ASSERT_HELD();
2248
2249 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
2250 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
2251
2252 ++nxprov_detaching_cnt;
2253 ASSERT(nxprov_detaching_cnt != 0);
2254 /*
2255 * Insert this to the detaching list; caller is expected to
2256 * have held a reference, most likely the same one that was
2257 * used for the per-domain provider list.
2258 */
2259 STAILQ_INSERT_TAIL(&nxprov_detaching_head, nxdom_prov,
2260 nxdom_prov_detaching_link);
2261 wakeup((caddr_t)&nxprov_detach_wchan);
2262 }
2263
2264 static struct kern_nexus_domain_provider *
nxprov_detaching_dequeue(void)2265 nxprov_detaching_dequeue(void)
2266 {
2267 struct kern_nexus_domain_provider *nxdom_prov;
2268
2269 SK_LOCK_ASSERT_HELD();
2270
2271 nxdom_prov = STAILQ_FIRST(&nxprov_detaching_head);
2272 ASSERT(nxprov_detaching_cnt != 0 || nxdom_prov == NULL);
2273 if (nxdom_prov != NULL) {
2274 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
2275 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
2276 ASSERT(nxprov_detaching_cnt != 0);
2277 --nxprov_detaching_cnt;
2278 STAILQ_REMOVE(&nxprov_detaching_head, nxdom_prov,
2279 kern_nexus_domain_provider, nxdom_prov_detaching_link);
2280 }
2281 return nxdom_prov;
2282 }
2283
2284 __attribute__((noreturn))
2285 static void
nxprov_detacher(void * v,wait_result_t w)2286 nxprov_detacher(void *v, wait_result_t w)
2287 {
2288 #pragma unused(v, w)
2289 SK_LOCK();
2290 (void) msleep0(&nxprov_detach_wchan, &sk_lock, (PZERO - 1),
2291 __func__, 0, nxprov_detacher_cont);
2292 /*
2293 * msleep0() shouldn't have returned as PCATCH was not set;
2294 * therefore assert in this case.
2295 */
2296 SK_UNLOCK();
2297 VERIFY(0);
2298 /* NOTREACHED */
2299 __builtin_unreachable();
2300 }
2301
2302 static int
nxprov_detacher_cont(int err)2303 nxprov_detacher_cont(int err)
2304 {
2305 #pragma unused(err)
2306 struct kern_nexus_domain_provider *nxdom_prov;
2307
2308 for (;;) {
2309 SK_LOCK_ASSERT_HELD();
2310 while (nxprov_detaching_cnt == 0) {
2311 (void) msleep0(&nxprov_detach_wchan, &sk_lock,
2312 (PZERO - 1), __func__, 0, nxprov_detacher_cont);
2313 /* NOTREACHED */
2314 }
2315
2316 ASSERT(STAILQ_FIRST(&nxprov_detaching_head) != NULL);
2317
2318 nxdom_prov = nxprov_detaching_dequeue();
2319 if (nxdom_prov != NULL) {
2320 nxdom_del_provider_final(nxdom_prov);
2321 }
2322 }
2323 }
2324