1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <skywalk/nexus/upipe/nx_user_pipe.h>
31 #include <skywalk/nexus/kpipe/nx_kernel_pipe.h>
32 #include <skywalk/nexus/flowswitch/nx_flowswitch.h>
33 #include <skywalk/nexus/netif/nx_netif.h>
34 #include <skywalk/nexus/monitor/nx_monitor.h>
35
36 static STAILQ_HEAD(, nxdom) nexus_domains =
37 STAILQ_HEAD_INITIALIZER(nexus_domains);
38
39 static void nxdom_attach(struct nxdom *);
40 static void nxdom_detach(struct nxdom *);
41 static void nxdom_init(struct nxdom *);
42 static void nxdom_terminate(struct nxdom *);
43 static void nxdom_fini(struct nxdom *);
44 static void nxdom_del_provider_final(struct kern_nexus_domain_provider *);
45
46 static int nxdom_prov_ext_init(struct kern_nexus_domain_provider *);
47 static void nxdom_prov_ext_fini(struct kern_nexus_domain_provider *);
48 static struct kern_nexus_domain_provider *nxdom_prov_alloc(zalloc_flags_t);
49 static void nxdom_prov_free(struct kern_nexus_domain_provider *);
50
51 static uint32_t nxprov_bound_var(uint32_t *, uint32_t, uint32_t, uint32_t,
52 const char *);
53 static void nxprov_detaching_enqueue(struct kern_nexus_domain_provider *);
54 static struct kern_nexus_domain_provider *nxprov_detaching_dequeue(void);
55 static void nxprov_detacher(void *, wait_result_t);
56 static int nxprov_detacher_cont(int);
57
58 static struct nexus_controller *ncd_alloc(zalloc_flags_t);
59 static void ncd_free(struct nexus_controller *);
60
61 static struct nexus_attr *nxa_alloc(zalloc_flags_t);
62 static void nxa_free(struct nexus_attr *);
63
64 static int _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid,
65 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
66 uuid_t *nx_if_uuid);
67
68 static ZONE_DECLARE(ncd_zone, SKMEM_ZONE_PREFIX ".nx.kern.ctl.desc",
69 sizeof(struct nexus_controller), ZC_ZFREE_CLEARMEM);
70
71 static ZONE_DECLARE(nxdom_prov_zone, SKMEM_ZONE_PREFIX ".nx.kern.dom.prov",
72 sizeof(struct kern_nexus_domain_provider), ZC_ZFREE_CLEARMEM);
73
74 static ZONE_DECLARE(nxa_zone, SKMEM_ZONE_PREFIX ".nx.kern.attr",
75 sizeof(struct nexus_attr), ZC_ZFREE_CLEARMEM);
76
77 static int __nxdom_inited = 0;
78 static STAILQ_HEAD(, kern_nexus_domain_provider) nxprov_detaching_head =
79 STAILQ_HEAD_INITIALIZER(nxprov_detaching_head);
80 static uint32_t nxprov_detaching_cnt;
81 static void *nxprov_detach_wchan; /* wait channel for detacher */
82
83 /*
84 * Array of default nexus domain providers. Initialized once during
85 * domain attach time; no lock is needed to read as they can be treated
86 * as immutables, since default providers imply built-in ones and they
87 * never detach in practice.
88 */
89 struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX];
90
91 void
nxdom_attach_all(void)92 nxdom_attach_all(void)
93 {
94 struct nxdom *nxdom;
95 thread_t tp = THREAD_NULL;
96
97 SK_LOCK_ASSERT_HELD();
98 ASSERT(!__nxdom_inited);
99 ASSERT(STAILQ_EMPTY(&nexus_domains));
100
101 #if CONFIG_NEXUS_FLOWSWITCH
102 nxdom_attach(&nx_flowswitch_dom_s);
103 #endif /* CONFIG_NEXUS_FLOWSWITCH */
104 #if CONFIG_NEXUS_USER_PIPE
105 nxdom_attach(&nx_upipe_dom_s);
106 #endif /* CONFIG_NEXUS_USER_PIPE */
107 #if CONFIG_NEXUS_KERNEL_PIPE
108 nxdom_attach(&nx_kpipe_dom_s);
109 #endif /* CONFIG_NEXUS_KERNEL_PIPE */
110 #if CONFIG_NEXUS_NETIF
111 nxdom_attach(&nx_netif_dom_s);
112 #endif /* CONFIG_NEXUS_NETIF */
113 #if CONFIG_NEXUS_MONITOR
114 nxdom_attach(&nx_monitor_dom_s);
115 #endif /* CONFIG_NEXUS_MONITOR */
116
117 /* ask domains to initialize */
118 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link)
119 nxdom_init(nxdom);
120
121 if (kernel_thread_start(nxprov_detacher, NULL, &tp) != KERN_SUCCESS) {
122 panic_plain("%s: couldn't create detacher thread", __func__);
123 /* NOTREACHED */
124 __builtin_unreachable();
125 }
126 thread_deallocate(tp);
127
128 __nxdom_inited = 1;
129 }
130
131 void
nxdom_detach_all(void)132 nxdom_detach_all(void)
133 {
134 struct nxdom *nxdom, *tnxdom;
135
136 SK_LOCK_ASSERT_HELD();
137
138 if (__nxdom_inited) {
139 STAILQ_FOREACH_SAFE(nxdom, &nexus_domains, nxdom_link, tnxdom) {
140 nxdom_terminate(nxdom);
141 nxdom_fini(nxdom);
142 nxdom_detach(nxdom);
143 }
144
145 /*
146 * TODO: [email protected] -- terminate detacher thread.
147 */
148
149 __nxdom_inited = 0;
150 }
151 ASSERT(STAILQ_EMPTY(&nexus_domains));
152 }
153
154 #define ASSERT_NXDOM_PARAMS(_dom, _var) do { \
155 ASSERT(NXDOM_MIN(_dom, _var) <= NXDOM_MAX(_dom, _var)); \
156 ASSERT(NXDOM_DEF(_dom, _var) >= NXDOM_MIN(_dom, _var)); \
157 ASSERT(NXDOM_DEF(_dom, _var) <= NXDOM_MAX(_dom, _var)); \
158 } while (0)
159
160 static void
nxdom_attach(struct nxdom * nxdom)161 nxdom_attach(struct nxdom *nxdom)
162 {
163 struct nxdom *nxdom1;
164
165 SK_LOCK_ASSERT_HELD();
166 ASSERT(!(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED));
167
168 STAILQ_FOREACH(nxdom1, &nexus_domains, nxdom_link) {
169 if (nxdom1->nxdom_type == nxdom->nxdom_type) {
170 /* type must be unique; this is a programming error */
171 VERIFY(0);
172 /* NOTREACHED */
173 __builtin_unreachable();
174 }
175 }
176
177 /* verify this is a valid type */
178 switch (nxdom->nxdom_type) {
179 case NEXUS_TYPE_USER_PIPE:
180 case NEXUS_TYPE_KERNEL_PIPE:
181 case NEXUS_TYPE_NET_IF:
182 case NEXUS_TYPE_FLOW_SWITCH:
183 case NEXUS_TYPE_MONITOR:
184 break;
185
186 default:
187 VERIFY(0);
188 /* NOTREACHED */
189 __builtin_unreachable();
190 }
191
192 /* verify this is a valid metadata type */
193 switch (nxdom->nxdom_md_type) {
194 case NEXUS_META_TYPE_QUANTUM:
195 case NEXUS_META_TYPE_PACKET:
196 break;
197
198 default:
199 VERIFY(0);
200 /* NOTREACHED */
201 __builtin_unreachable();
202 }
203
204 /* verify this is a valid metadata subtype */
205 switch (nxdom->nxdom_md_subtype) {
206 case NEXUS_META_SUBTYPE_PAYLOAD:
207 case NEXUS_META_SUBTYPE_RAW:
208 break;
209
210 default:
211 VERIFY(0);
212 /* NOTREACHED */
213 __builtin_unreachable();
214 }
215
216 #if (DEVELOPMENT || DEBUG)
217 /*
218 * Override the default ring sizes for flowswitch if configured
219 * via boot-args. Each nexus provider instance can still change
220 * the values if so desired.
221 */
222 if (nxdom->nxdom_type == NEXUS_TYPE_FLOW_SWITCH) {
223 if (sk_txring_sz != 0) {
224 if (sk_txring_sz < NXDOM_MIN(nxdom, tx_slots)) {
225 sk_txring_sz = NXDOM_MIN(nxdom, tx_slots);
226 } else if (sk_txring_sz > NXDOM_MAX(nxdom, tx_slots)) {
227 sk_txring_sz = NXDOM_MAX(nxdom, tx_slots);
228 }
229 NXDOM_DEF(nxdom, tx_slots) = sk_txring_sz;
230 }
231 if (sk_rxring_sz != 0) {
232 if (sk_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) {
233 sk_rxring_sz = NXDOM_MIN(nxdom, rx_slots);
234 } else if (sk_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) {
235 sk_rxring_sz = NXDOM_MAX(nxdom, rx_slots);
236 }
237 NXDOM_DEF(nxdom, rx_slots) = sk_rxring_sz;
238 }
239 }
240 /*
241 * Override the default ring sizes for netif if configured
242 * via boot-args. Each nexus provider instance can still change
243 * the values if so desired.
244 */
245 if (nxdom->nxdom_type == NEXUS_TYPE_NET_IF) {
246 if (sk_net_txring_sz != 0) {
247 if (sk_net_txring_sz < NXDOM_MIN(nxdom, tx_slots)) {
248 sk_net_txring_sz = NXDOM_MIN(nxdom, tx_slots);
249 } else if (sk_net_txring_sz > NXDOM_MAX(nxdom, tx_slots)) {
250 sk_net_txring_sz = NXDOM_MAX(nxdom, tx_slots);
251 }
252 NXDOM_DEF(nxdom, tx_slots) = sk_net_txring_sz;
253 }
254 if (sk_net_rxring_sz != 0) {
255 if (sk_net_rxring_sz < NXDOM_MIN(nxdom, rx_slots)) {
256 sk_net_rxring_sz = NXDOM_MIN(nxdom, rx_slots);
257 } else if (sk_net_rxring_sz > NXDOM_MAX(nxdom, rx_slots)) {
258 sk_net_rxring_sz = NXDOM_MAX(nxdom, rx_slots);
259 }
260 NXDOM_DEF(nxdom, rx_slots) = sk_net_rxring_sz;
261 }
262 }
263
264 #endif /* DEVELOPMENT || DEBUG */
265
266 /* verify that parameters are sane */
267 ASSERT(NXDOM_MAX(nxdom, ports) > 0);
268 ASSERT_NXDOM_PARAMS(nxdom, ports);
269 ASSERT_NXDOM_PARAMS(nxdom, tx_rings);
270 ASSERT_NXDOM_PARAMS(nxdom, rx_rings);
271 ASSERT(NXDOM_MAX(nxdom, tx_slots) > 0);
272 ASSERT_NXDOM_PARAMS(nxdom, tx_slots);
273 ASSERT(NXDOM_MAX(nxdom, rx_slots) > 0);
274 ASSERT_NXDOM_PARAMS(nxdom, rx_slots);
275 ASSERT_NXDOM_PARAMS(nxdom, buf_size);
276 ASSERT_NXDOM_PARAMS(nxdom, meta_size);
277 ASSERT_NXDOM_PARAMS(nxdom, pipes);
278 ASSERT_NXDOM_PARAMS(nxdom, extensions);
279
280 /* these must exist */
281 ASSERT(nxdom->nxdom_bind_port != NULL);
282 ASSERT(nxdom->nxdom_unbind_port != NULL);
283 ASSERT(nxdom->nxdom_connect != NULL);
284 ASSERT(nxdom->nxdom_disconnect != NULL);
285 ASSERT(nxdom->nxdom_defunct != NULL);
286 ASSERT(nxdom->nxdom_defunct_finalize != NULL);
287
288 STAILQ_INSERT_TAIL(&nexus_domains, nxdom, nxdom_link);
289 nxdom->nxdom_flags |= NEXUSDOMF_ATTACHED;
290 }
291
292 #undef VERIFY_NXDOM_PARAMS
293
294 static void
nxdom_detach(struct nxdom * nxdom)295 nxdom_detach(struct nxdom *nxdom)
296 {
297 SK_LOCK_ASSERT_HELD();
298 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
299
300 STAILQ_REMOVE(&nexus_domains, nxdom, nxdom, nxdom_link);
301 nxdom->nxdom_flags &= ~NEXUSDOMF_ATTACHED;
302 }
303
304 static void
nxdom_init(struct nxdom * nxdom)305 nxdom_init(struct nxdom *nxdom)
306 {
307 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
308
309 SK_LOCK_ASSERT_HELD();
310
311 if (!(nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED)) {
312 if (nxdom->nxdom_init != NULL) {
313 nxdom->nxdom_init(nxdom);
314 }
315 nxdom->nxdom_flags |= NEXUSDOMF_INITIALIZED;
316 }
317 }
318
319 static void
nxdom_terminate(struct nxdom * nxdom)320 nxdom_terminate(struct nxdom *nxdom)
321 {
322 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
323
324 SK_LOCK_ASSERT_HELD();
325
326 if ((nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) &&
327 !(nxdom->nxdom_flags & NEXUSDOMF_TERMINATED)) {
328 if (nxdom->nxdom_terminate != NULL) {
329 nxdom->nxdom_terminate(nxdom);
330 }
331 nxdom->nxdom_flags |= NEXUSDOMF_TERMINATED;
332 }
333 }
334
335 static void
nxdom_fini(struct nxdom * nxdom)336 nxdom_fini(struct nxdom *nxdom)
337 {
338 ASSERT(nxdom->nxdom_flags & NEXUSDOMF_ATTACHED);
339
340 if (nxdom->nxdom_flags & NEXUSDOMF_INITIALIZED) {
341 if (nxdom->nxdom_fini != NULL) {
342 nxdom->nxdom_fini(nxdom);
343 }
344 nxdom->nxdom_flags &= ~NEXUSDOMF_INITIALIZED;
345 }
346 }
347
348 int
nxdom_prov_add(struct nxdom * nxdom,struct kern_nexus_domain_provider * nxdom_prov)349 nxdom_prov_add(struct nxdom *nxdom,
350 struct kern_nexus_domain_provider *nxdom_prov)
351 {
352 struct kern_nexus_domain_provider *nxprov1;
353 nexus_type_t type = nxdom->nxdom_type;
354 boolean_t builtin;
355 int err = 0;
356
357 SK_LOCK_ASSERT_HELD();
358 ASSERT(type < NEXUS_TYPE_MAX);
359
360 builtin = !(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT);
361
362 STAILQ_FOREACH(nxprov1, &nxdom->nxdom_prov_head, nxdom_prov_link) {
363 /*
364 * We can be a little more strict in the kernel and
365 * avoid namespace collision (even though each domain
366 * provider has UUID; this also guarantees that external
367 * providers won't conflict with the builtin ones.
368 */
369 if (strcmp(nxprov1->nxdom_prov_name,
370 nxdom_prov->nxdom_prov_name) == 0) {
371 return EEXIST;
372 }
373 }
374
375 VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED));
376 VERIFY(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED));
377
378 uuid_generate_random(nxdom_prov->nxdom_prov_uuid);
379 nxdom_prov->nxdom_prov_dom = nxdom;
380 if (nxdom_prov->nxdom_prov_init != NULL) {
381 err = nxdom_prov->nxdom_prov_init(nxdom_prov);
382 }
383
384 if (err == 0) {
385 nxdom_prov->nxdom_prov_flags |=
386 (NXDOMPROVF_ATTACHED | NXDOMPROVF_INITIALIZED);
387 STAILQ_INSERT_TAIL(&nxdom->nxdom_prov_head, nxdom_prov,
388 nxdom_prov_link);
389 /* for being in the list */
390 nxdom_prov_retain_locked(nxdom_prov);
391
392 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) {
393 VERIFY(builtin && nxdom_prov_default[type] == NULL);
394 nxdom_prov_default[type] = nxdom_prov;
395 /* for being in the array */
396 nxdom_prov_retain_locked(nxdom_prov);
397 }
398
399 SK_D("nxdom_prov 0x%llx (%s) dom %s",
400 SK_KVA(nxdom_prov), nxdom_prov->nxdom_prov_name,
401 nxdom->nxdom_name);
402 } else {
403 uuid_clear(nxdom_prov->nxdom_prov_uuid);
404 nxdom_prov->nxdom_prov_dom = NULL;
405 }
406
407 return err;
408 }
409
410 void
nxdom_prov_del(struct kern_nexus_domain_provider * nxdom_prov)411 nxdom_prov_del(struct kern_nexus_domain_provider *nxdom_prov)
412 {
413 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
414 nexus_type_t type = nxdom->nxdom_type;
415
416 SK_LOCK_ASSERT_HELD();
417 ASSERT(type < NEXUS_TYPE_MAX);
418 ASSERT(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_ATTACHED);
419
420 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DETACHING) {
421 return;
422 }
423
424 SK_D("nxdom_prov 0x%llx (%s:%s)", SK_KVA(nxdom_prov), nxdom->nxdom_name,
425 nxdom_prov->nxdom_prov_name);
426
427 /* keep the reference around for the detaching list (see below) */
428 STAILQ_REMOVE(&nxdom->nxdom_prov_head, nxdom_prov,
429 kern_nexus_domain_provider, nxdom_prov_link);
430 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_ATTACHED;
431 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_DETACHING;
432
433 /* there can only be one default and it must match this one */
434 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT) {
435 ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT));
436 VERIFY(nxdom_prov_default[type] == nxdom_prov);
437 nxdom_prov_default[type] = NULL;
438 /*
439 * Release reference held for the array; this must
440 * not be the last reference, as there is still at
441 * least one which we kept for the detaching list.
442 */
443 VERIFY(!nxdom_prov_release_locked(nxdom_prov));
444 }
445
446 /* add to detaching list and wake up detacher */
447 nxprov_detaching_enqueue(nxdom_prov);
448 }
449
450 static void
nxdom_del_provider_final(struct kern_nexus_domain_provider * nxdom_prov)451 nxdom_del_provider_final(struct kern_nexus_domain_provider *nxdom_prov)
452 {
453 #if (DEBUG || DEVELOPMENT)
454 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
455 #endif /* DEBUG || DEVELOPMENT */
456
457 SK_LOCK_ASSERT_HELD();
458
459 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
460 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
461 ASSERT(nxdom != NULL);
462
463 SK_D("nxdom_prov 0x%llx (%s:%s)", SK_KVA(nxdom_prov), nxdom->nxdom_name,
464 nxdom_prov->nxdom_prov_name);
465
466 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_DETACHING;
467
468 /*
469 * Release reference held for detaching list; if this is the last
470 * reference, the domain provider's nxdom_prov_fini() callback will
471 * be called (if applicable) within the detacher thread's context.
472 * Otherwise, this will occur when the last nexus provider for that
473 * domain provider has been released.
474 */
475 (void) nxdom_prov_release_locked(nxdom_prov);
476 }
477
478 struct nxdom *
nxdom_find(nexus_type_t type)479 nxdom_find(nexus_type_t type)
480 {
481 struct nxdom *nxdom;
482
483 SK_LOCK_ASSERT_HELD();
484 ASSERT(type < NEXUS_TYPE_MAX);
485
486 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) {
487 if (nxdom->nxdom_type == type) {
488 break;
489 }
490 }
491
492 return nxdom;
493 }
494
495 struct kern_nexus_domain_provider *
nxdom_prov_find(const struct nxdom * nxdom,const char * name)496 nxdom_prov_find(const struct nxdom *nxdom, const char *name)
497 {
498 struct kern_nexus_domain_provider *nxdom_prov = NULL;
499
500 SK_LOCK_ASSERT_HELD();
501
502 if (name != NULL) {
503 STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head,
504 nxdom_prov_link) {
505 if (strcmp(nxdom_prov->nxdom_prov_name, name) == 0) {
506 break;
507 }
508 }
509 }
510
511 if (nxdom_prov != NULL) {
512 nxdom_prov_retain_locked(nxdom_prov); /* for caller */
513 }
514 return nxdom_prov;
515 }
516
517 struct kern_nexus_domain_provider *
nxdom_prov_find_uuid(const uuid_t dom_prov_uuid)518 nxdom_prov_find_uuid(const uuid_t dom_prov_uuid)
519 {
520 struct kern_nexus_domain_provider *nxdom_prov = NULL;
521 struct nxdom *nxdom;
522
523 SK_LOCK_ASSERT_HELD();
524 ASSERT(dom_prov_uuid != NULL && !uuid_is_null(dom_prov_uuid));
525
526 STAILQ_FOREACH(nxdom, &nexus_domains, nxdom_link) {
527 STAILQ_FOREACH(nxdom_prov, &nxdom->nxdom_prov_head,
528 nxdom_prov_link) {
529 ASSERT(!uuid_is_null(nxdom_prov->nxdom_prov_uuid));
530 if (uuid_compare(nxdom_prov->nxdom_prov_uuid,
531 dom_prov_uuid) == 0) {
532 break;
533 }
534 }
535 if (nxdom_prov != NULL) {
536 nxdom_prov_retain_locked(nxdom_prov); /* for caller */
537 break;
538 }
539 }
540
541 return nxdom_prov;
542 }
543
544 errno_t
kern_nexus_register_domain_provider(const nexus_type_t type,const nexus_domain_provider_name_t name,const struct kern_nexus_domain_provider_init * init,const uint32_t init_len,uuid_t * dom_prov_uuid)545 kern_nexus_register_domain_provider(const nexus_type_t type,
546 const nexus_domain_provider_name_t name,
547 const struct kern_nexus_domain_provider_init *init,
548 const uint32_t init_len, uuid_t *dom_prov_uuid)
549 {
550 struct kern_nexus_domain_provider *nxdom_prov = NULL;
551 struct nxdom *nxdom;
552 errno_t err = 0;
553
554 _CASSERT(sizeof(*init) == sizeof(nxdom_prov->nxdom_prov_ext));
555
556 if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) {
557 return EINVAL;
558 }
559
560 uuid_clear(*dom_prov_uuid);
561
562 if (name == NULL || init == NULL || init_len < sizeof(*init) ||
563 init->nxdpi_version != KERN_NEXUS_DOMAIN_PROVIDER_CURRENT_VERSION) {
564 return EINVAL;
565 }
566
567 /*
568 * init, fini are required.
569 */
570 if (init->nxdpi_init == NULL || init->nxdpi_fini == NULL) {
571 return EINVAL;
572 }
573
574 SK_LOCK();
575 if (nxdom_prov_default[type] == NULL) {
576 err = ENXIO;
577 goto done;
578 }
579
580 nxdom = nxdom_find(type);
581 if (nxdom == NULL) {
582 err = ENXIO;
583 goto done;
584 }
585
586 /*
587 * Allow only kernel pipe and netif external domain providers for
588 * now, until we understand the implications and requirements for
589 * supporting other domain types. For all other types, using
590 * the built-in domain providers and registering nexus should
591 * suffice.
592 */
593 if (nxdom->nxdom_type != NEXUS_TYPE_KERNEL_PIPE &&
594 nxdom->nxdom_type != NEXUS_TYPE_NET_IF) {
595 err = EINVAL;
596 goto done;
597 }
598
599 nxdom_prov = nxdom_prov_alloc(Z_WAITOK);
600
601 /*
602 * Point all callback routines to the default provider for this
603 * domain; for nxdom_prov{init,fini}, refer to externally-provided
604 * callback routines, if applicable.
605 */
606 bcopy(init, &nxdom_prov->nxdom_prov_ext, sizeof(*init));
607 bcopy(&nxdom_prov_default[type]->nxdom_prov_cb,
608 &nxdom_prov->nxdom_prov_cb, sizeof(struct nxdom_prov_cb));
609 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT;
610 nxdom_prov->nxdom_prov_init = nxdom_prov_ext_init;
611 nxdom_prov->nxdom_prov_fini = nxdom_prov_ext_fini;
612 (void) snprintf(nxdom_prov->nxdom_prov_name,
613 sizeof(nxdom_prov->nxdom_prov_name), "%s", name);
614
615 ASSERT(!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_DEFAULT));
616 err = nxdom_prov_add(nxdom, nxdom_prov);
617 if (err != 0) {
618 nxdom_prov_free(nxdom_prov);
619 nxdom_prov = NULL;
620 }
621
622 done:
623 if (nxdom_prov != NULL) {
624 ASSERT(err == 0 && !uuid_is_null(nxdom_prov->nxdom_prov_uuid));
625 uuid_copy(*dom_prov_uuid, nxdom_prov->nxdom_prov_uuid);
626 }
627 SK_UNLOCK();
628
629 return err;
630 }
631
632 errno_t
kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid)633 kern_nexus_deregister_domain_provider(const uuid_t dom_prov_uuid)
634 {
635 struct kern_nexus_domain_provider *nxdom_prov = NULL;
636 errno_t err = 0;
637
638 if (dom_prov_uuid == NULL || uuid_is_null(dom_prov_uuid)) {
639 return EINVAL;
640 }
641
642 SK_LOCK();
643 nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid);
644 if (nxdom_prov == NULL) {
645 err = ENXIO;
646 goto done;
647 }
648
649 /* don't allow external request for built-in domain providers */
650 if (!(nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT)) {
651 err = EINVAL;
652 goto done;
653 }
654
655 /* schedule this to be deleted */
656 nxdom_prov_del(nxdom_prov);
657 done:
658 /* release reference from nxdom_prov_find_uuid */
659 if (nxdom_prov != NULL) {
660 (void) nxdom_prov_release_locked(nxdom_prov);
661 }
662 SK_UNLOCK();
663
664 return err;
665 }
666
667 errno_t
kern_nexus_get_default_domain_provider(const nexus_type_t type,uuid_t * dom_prov_uuid)668 kern_nexus_get_default_domain_provider(const nexus_type_t type,
669 uuid_t *dom_prov_uuid)
670 {
671 struct kern_nexus_domain_provider *nxdom_prov;
672
673 if (type >= NEXUS_TYPE_MAX || dom_prov_uuid == NULL) {
674 return EINVAL;
675 }
676
677 uuid_clear(*dom_prov_uuid);
678
679 /* no lock is needed; array is immutable */
680 if ((nxdom_prov = nxdom_prov_default[type]) == NULL) {
681 return ENXIO;
682 }
683
684 uuid_copy(*dom_prov_uuid, nxdom_prov->nxdom_prov_uuid);
685
686 return 0;
687 }
688
689 static int
nxdom_prov_ext_init(struct kern_nexus_domain_provider * nxdom_prov)690 nxdom_prov_ext_init(struct kern_nexus_domain_provider *nxdom_prov)
691 {
692 int err = 0;
693
694 SK_D("initializing %s", nxdom_prov->nxdom_prov_name);
695
696 ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_init != NULL);
697 if ((err = nxdom_prov->nxdom_prov_ext.nxdpi_init(nxdom_prov)) == 0) {
698 nxdom_prov->nxdom_prov_flags |= NXDOMPROVF_EXT_INITED;
699 }
700
701 return err;
702 }
703
704 static void
nxdom_prov_ext_fini(struct kern_nexus_domain_provider * nxdom_prov)705 nxdom_prov_ext_fini(struct kern_nexus_domain_provider *nxdom_prov)
706 {
707 SK_D("destroying %s", nxdom_prov->nxdom_prov_name);
708
709 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT_INITED) {
710 ASSERT(nxdom_prov->nxdom_prov_ext.nxdpi_fini != NULL);
711 nxdom_prov->nxdom_prov_ext.nxdpi_fini(nxdom_prov);
712 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_EXT_INITED;
713 }
714 }
715
716 static struct nexus_attr *
nxa_alloc(zalloc_flags_t how)717 nxa_alloc(zalloc_flags_t how)
718 {
719 return zalloc_flags(nxa_zone, how | Z_ZERO);
720 }
721
722 static void
nxa_free(struct nexus_attr * nxa)723 nxa_free(struct nexus_attr *nxa)
724 {
725 SK_DF(SK_VERB_MEM, "nxa 0x%llx FREE", SK_KVA(nxa));
726 zfree(nxa_zone, nxa);
727 }
728
729 errno_t
kern_nexus_attr_create(nexus_attr_t * nxa)730 kern_nexus_attr_create(nexus_attr_t *nxa)
731 {
732 errno_t err = 0;
733
734 if (nxa == NULL) {
735 err = EINVAL;
736 } else {
737 *nxa = nxa_alloc(Z_WAITOK);
738 }
739 return err;
740 }
741
742 errno_t
kern_nexus_attr_clone(const nexus_attr_t nxa,nexus_attr_t * nnxa)743 kern_nexus_attr_clone(const nexus_attr_t nxa, nexus_attr_t *nnxa)
744 {
745 errno_t err = 0;
746
747 if (nnxa == NULL) {
748 err = EINVAL;
749 } else {
750 err = kern_nexus_attr_create(nnxa);
751 if (err == 0 && nxa != NULL) {
752 ASSERT(*nnxa != NULL);
753 bcopy(nxa, *nnxa, sizeof(**nnxa));
754 }
755 }
756 return err;
757 }
758
759 errno_t
kern_nexus_attr_set(const nexus_attr_t nxa,const nexus_attr_type_t type,const uint64_t value)760 kern_nexus_attr_set(const nexus_attr_t nxa,
761 const nexus_attr_type_t type, const uint64_t value)
762 {
763 return __nexus_attr_set(nxa, type, value);
764 }
765
766 errno_t
kern_nexus_attr_get(nexus_attr_t nxa,const nexus_attr_type_t type,uint64_t * value)767 kern_nexus_attr_get(nexus_attr_t nxa, const nexus_attr_type_t type,
768 uint64_t *value)
769 {
770 return __nexus_attr_get(nxa, type, value);
771 }
772
773 void
kern_nexus_attr_destroy(nexus_attr_t nxa)774 kern_nexus_attr_destroy(nexus_attr_t nxa)
775 {
776 nxa_free(nxa);
777 }
778
779 static struct nexus_controller *
ncd_alloc(zalloc_flags_t how)780 ncd_alloc(zalloc_flags_t how)
781 {
782 return zalloc_flags(ncd_zone, how | Z_ZERO);
783 }
784
785 static void
ncd_free(struct nexus_controller * ncd)786 ncd_free(struct nexus_controller *ncd)
787 {
788 SK_DF(SK_VERB_MEM, "ncd 0x%llx FREE", SK_KVA(ncd));
789 zfree(ncd_zone, ncd);
790 }
791
792 nexus_controller_t
kern_nexus_shared_controller(void)793 kern_nexus_shared_controller(void)
794 {
795 return &kernnxctl;
796 }
797
798 errno_t
kern_nexus_controller_create(nexus_controller_t * ncd)799 kern_nexus_controller_create(nexus_controller_t *ncd)
800 {
801 struct nxctl *nxctl = NULL;
802 uuid_t nxctl_uuid;
803 errno_t err = 0;
804
805 uuid_generate_random(nxctl_uuid);
806
807 if (ncd == NULL) {
808 err = EINVAL;
809 goto done;
810 } else {
811 *ncd = NULL;
812 }
813
814 nxctl = nxctl_create(kernproc, NULL, nxctl_uuid, &err);
815 if (nxctl == NULL) {
816 ASSERT(err != 0);
817 goto done;
818 }
819
820 *ncd = ncd_alloc(Z_WAITOK);
821 (*ncd)->ncd_nxctl = nxctl; /* ref from nxctl_create */
822
823 done:
824 if (err != 0) {
825 if (nxctl != NULL) {
826 nxctl_dtor(nxctl);
827 nxctl = NULL;
828 }
829 if (ncd != NULL && *ncd != NULL) {
830 ncd_free(*ncd);
831 *ncd = NULL;
832 }
833 }
834
835 return err;
836 }
837
838 #define NXPI_INVALID_CB_PAIRS(cb1, cb2) \
839 (!(init->nxpi_##cb1 == NULL && init->nxpi_##cb2 == NULL) && \
840 ((init->nxpi_##cb1 == NULL) ^ (init->nxpi_##cb2 == NULL)))
841
842 static errno_t
nexus_controller_register_provider_validate_init_params(const struct kern_nexus_provider_init * init,uint32_t init_len,nexus_type_t nxdom_type)843 nexus_controller_register_provider_validate_init_params(
844 const struct kern_nexus_provider_init *init, uint32_t init_len,
845 nexus_type_t nxdom_type)
846 {
847 errno_t err = 0;
848 struct kern_nexus_netif_provider_init *netif_init;
849
850 _CASSERT(__builtin_offsetof(struct kern_nexus_provider_init,
851 nxpi_version) == 0);
852 _CASSERT(sizeof(init->nxpi_version) == sizeof(uint32_t));
853
854 if (init == NULL) {
855 return 0;
856 }
857
858 if (init_len < sizeof(uint32_t)) {
859 return EINVAL;
860 }
861
862 switch (init->nxpi_version) {
863 case KERN_NEXUS_PROVIDER_VERSION_1:
864 if (init_len != sizeof(struct kern_nexus_provider_init)) {
865 err = EINVAL;
866 break;
867 }
868 /*
869 * sync_{tx,rx} callbacks are required; the rest of the
870 * callback pairs are optional, but must be symmetrical.
871 */
872 if (init->nxpi_sync_tx == NULL || init->nxpi_sync_rx == NULL ||
873 init->nxpi_pre_connect == NULL ||
874 init->nxpi_connected == NULL ||
875 init->nxpi_pre_disconnect == NULL ||
876 init->nxpi_disconnected == NULL ||
877 NXPI_INVALID_CB_PAIRS(ring_init, ring_fini) ||
878 NXPI_INVALID_CB_PAIRS(slot_init, slot_fini)) {
879 err = EINVAL;
880 break;
881 }
882 /*
883 * Tx doorbell interface is only supported for netif and
884 * Tx doorbell is mandatory for netif
885 */
886 if (((init->nxpi_tx_doorbell != NULL) &&
887 (nxdom_type != NEXUS_TYPE_NET_IF)) ||
888 ((nxdom_type == NEXUS_TYPE_NET_IF) &&
889 (init->nxpi_tx_doorbell == NULL))) {
890 err = EINVAL;
891 break;
892 }
893 /*
894 * Capabilities configuration interface is only supported for
895 * netif.
896 */
897 if ((init->nxpi_config_capab != NULL) &&
898 (nxdom_type != NEXUS_TYPE_NET_IF)) {
899 err = EINVAL;
900 break;
901 }
902 break;
903
904 case KERN_NEXUS_PROVIDER_VERSION_NETIF:
905 if (init_len != sizeof(struct kern_nexus_netif_provider_init)) {
906 err = EINVAL;
907 break;
908 }
909 if (nxdom_type != NEXUS_TYPE_NET_IF) {
910 err = EINVAL;
911 break;
912 }
913 netif_init =
914 __DECONST(struct kern_nexus_netif_provider_init *, init);
915 if (netif_init->nxnpi_pre_connect == NULL ||
916 netif_init->nxnpi_connected == NULL ||
917 netif_init->nxnpi_pre_disconnect == NULL ||
918 netif_init->nxnpi_disconnected == NULL ||
919 netif_init->nxnpi_qset_init == NULL ||
920 netif_init->nxnpi_qset_fini == NULL ||
921 netif_init->nxnpi_queue_init == NULL ||
922 netif_init->nxnpi_queue_fini == NULL ||
923 netif_init->nxnpi_tx_qset_notify == NULL ||
924 netif_init->nxnpi_config_capab == NULL) {
925 err = EINVAL;
926 break;
927 }
928 break;
929
930 default:
931 err = EINVAL;
932 break;
933 }
934 return err;
935 }
936
937 errno_t
kern_nexus_controller_register_provider(const nexus_controller_t ncd,const uuid_t dom_prov_uuid,const nexus_name_t name,const struct kern_nexus_provider_init * init,uint32_t init_len,const nexus_attr_t nxa,uuid_t * prov_uuid)938 kern_nexus_controller_register_provider(const nexus_controller_t ncd,
939 const uuid_t dom_prov_uuid, const nexus_name_t name,
940 const struct kern_nexus_provider_init *init, uint32_t init_len,
941 const nexus_attr_t nxa, uuid_t *prov_uuid)
942 {
943 struct kern_nexus_domain_provider *nxdom_prov = NULL;
944 struct kern_nexus_provider *nxprov = NULL;
945 nexus_type_t nxdom_type;
946 struct nxprov_reg reg;
947 struct nxctl *nxctl;
948 errno_t err = 0;
949
950 if (prov_uuid == NULL) {
951 return EINVAL;
952 }
953
954 uuid_clear(*prov_uuid);
955
956 if (ncd == NULL ||
957 dom_prov_uuid == NULL || uuid_is_null(dom_prov_uuid)) {
958 return EINVAL;
959 }
960
961 nxctl = ncd->ncd_nxctl;
962 NXCTL_LOCK(nxctl);
963 SK_LOCK();
964 nxdom_prov = nxdom_prov_find_uuid(dom_prov_uuid);
965 if (nxdom_prov == NULL) {
966 SK_UNLOCK();
967 err = ENXIO;
968 goto done;
969 }
970
971 nxdom_type = nxdom_prov->nxdom_prov_dom->nxdom_type;
972 ASSERT(nxdom_type < NEXUS_TYPE_MAX);
973
974 err = nexus_controller_register_provider_validate_init_params(init,
975 init_len, nxdom_type);
976 if (err != 0) {
977 SK_UNLOCK();
978 err = EINVAL;
979 goto done;
980 }
981
982 if ((err = __nexus_provider_reg_prepare(®, name,
983 nxdom_type, nxa)) != 0) {
984 SK_UNLOCK();
985 goto done;
986 }
987
988 if (init && init->nxpi_version == KERN_NEXUS_PROVIDER_VERSION_NETIF) {
989 reg.nxpreg_params.nxp_flags |= NXPF_NETIF_LLINK;
990 }
991
992 /* callee will hold reference on nxdom_prov upon success */
993 if ((nxprov = nxprov_create_kern(nxctl, nxdom_prov, ®,
994 init, &err)) == NULL) {
995 SK_UNLOCK();
996 ASSERT(err != 0);
997 goto done;
998 }
999 SK_UNLOCK();
1000
1001 uuid_copy(*prov_uuid, nxprov->nxprov_uuid);
1002
1003 done:
1004 SK_LOCK_ASSERT_NOTHELD();
1005 NXCTL_UNLOCK(nxctl);
1006
1007 if (err != 0 && nxprov != NULL) {
1008 err = nxprov_close(nxprov, FALSE);
1009 }
1010
1011 /* release extra ref from nxprov_create_kern */
1012 if (nxprov != NULL) {
1013 nxprov_release(nxprov);
1014 }
1015 /* release extra ref from nxdom_prov_find_uuid */
1016 if (nxdom_prov != NULL) {
1017 (void) nxdom_prov_release(nxdom_prov);
1018 }
1019
1020 return err;
1021 }
1022
1023 #undef NXPI_INVALID_CB_PAIRS
1024
1025 errno_t
kern_nexus_controller_deregister_provider(const nexus_controller_t ncd,const uuid_t prov_uuid)1026 kern_nexus_controller_deregister_provider(const nexus_controller_t ncd,
1027 const uuid_t prov_uuid)
1028 {
1029 errno_t err;
1030
1031 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid)) {
1032 err = EINVAL;
1033 } else {
1034 struct nxctl *nxctl = ncd->ncd_nxctl;
1035 NXCTL_LOCK(nxctl);
1036 err = nxprov_destroy(nxctl, prov_uuid);
1037 NXCTL_UNLOCK(nxctl);
1038 }
1039 return err;
1040 }
1041
1042 errno_t
kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd,const uuid_t prov_uuid,const void * nx_ctx,nexus_ctx_release_fn_t nx_ctx_release,uuid_t * nx_uuid,const struct kern_nexus_init * init)1043 kern_nexus_controller_alloc_provider_instance(const nexus_controller_t ncd,
1044 const uuid_t prov_uuid, const void *nx_ctx,
1045 nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid,
1046 const struct kern_nexus_init *init)
1047 {
1048 struct kern_nexus *nx = NULL;
1049 struct nxctl *nxctl;
1050 errno_t err = 0;
1051
1052 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1053 nx_uuid == NULL || init == NULL ||
1054 init->nxi_version != KERN_NEXUS_CURRENT_VERSION ||
1055 (init->nxi_rx_pbufpool != NULL &&
1056 init->nxi_rx_pbufpool != init->nxi_tx_pbufpool)) {
1057 err = EINVAL;
1058 goto done;
1059 }
1060
1061 nxctl = ncd->ncd_nxctl;
1062 NXCTL_LOCK(nxctl);
1063 nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_UNDEFINED, nx_ctx,
1064 nx_ctx_release, init->nxi_tx_pbufpool, init->nxi_rx_pbufpool, &err);
1065 NXCTL_UNLOCK(nxctl);
1066 if (nx == NULL) {
1067 ASSERT(err != 0);
1068 goto done;
1069 }
1070 ASSERT(err == 0);
1071 uuid_copy(*nx_uuid, nx->nx_uuid);
1072
1073 done:
1074 /* release extra ref from nx_create */
1075 if (nx != NULL) {
1076 (void) nx_release(nx);
1077 }
1078
1079 return err;
1080 }
1081
1082 errno_t
kern_nexus_controller_alloc_net_provider_instance(const nexus_controller_t ncd,const uuid_t prov_uuid,const void * nx_ctx,nexus_ctx_release_fn_t nx_ctx_release,uuid_t * nx_uuid,const struct kern_nexus_net_init * init,struct ifnet ** pifp)1083 kern_nexus_controller_alloc_net_provider_instance(
1084 const nexus_controller_t ncd, const uuid_t prov_uuid, const void *nx_ctx,
1085 nexus_ctx_release_fn_t nx_ctx_release, uuid_t *nx_uuid,
1086 const struct kern_nexus_net_init *init, struct ifnet **pifp)
1087 {
1088 struct kern_nexus *nx = NULL;
1089 struct ifnet *ifp = NULL;
1090 struct nxctl *nxctl;
1091 boolean_t nxctl_locked = FALSE;
1092 errno_t err = 0;
1093
1094 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1095 nx_uuid == NULL || init == NULL ||
1096 init->nxneti_version != KERN_NEXUS_NET_CURRENT_VERSION ||
1097 init->nxneti_eparams == NULL || pifp == NULL) {
1098 err = EINVAL;
1099 goto done;
1100 }
1101
1102 /*
1103 * Skywalk native interface doesn't support legacy model.
1104 */
1105 if ((init->nxneti_eparams->start != NULL) ||
1106 (init->nxneti_eparams->flags & IFNET_INIT_LEGACY) ||
1107 (init->nxneti_eparams->flags & IFNET_INIT_INPUT_POLL)) {
1108 err = EINVAL;
1109 goto done;
1110 }
1111
1112 /* create an embryonic ifnet */
1113 err = ifnet_allocate_extended(init->nxneti_eparams, &ifp);
1114 if (err != 0) {
1115 goto done;
1116 }
1117
1118 nxctl = ncd->ncd_nxctl;
1119 NXCTL_LOCK(nxctl);
1120 nxctl_locked = TRUE;
1121
1122 nx = nx_create(nxctl, prov_uuid, NEXUS_TYPE_NET_IF, nx_ctx,
1123 nx_ctx_release, init->nxneti_tx_pbufpool, init->nxneti_rx_pbufpool,
1124 &err);
1125 if (nx == NULL) {
1126 ASSERT(err != 0);
1127 goto done;
1128 }
1129
1130 if (NX_LLINK_PROV(nx)) {
1131 if (init->nxneti_llink == NULL) {
1132 SK_ERR("logical link configuration required");
1133 err = EINVAL;
1134 goto done;
1135 }
1136 err = nx_netif_default_llink_config(NX_NETIF_PRIVATE(nx),
1137 init->nxneti_llink);
1138 if (err != 0) {
1139 goto done;
1140 }
1141 }
1142
1143 /* prepare this ifnet instance if needed */
1144 if (init->nxneti_prepare != NULL) {
1145 err = init->nxneti_prepare(nx, ifp);
1146 if (err != 0) {
1147 goto done;
1148 }
1149 }
1150
1151 /* attach embryonic ifnet to nexus */
1152 err = _kern_nexus_ifattach(nxctl, nx->nx_uuid, ifp, NULL, FALSE, NULL);
1153
1154 if (err != 0) {
1155 goto done;
1156 }
1157
1158 /* and finalize the ifnet attach */
1159 ASSERT(nxctl_locked);
1160 NXCTL_UNLOCK(nxctl);
1161 nxctl_locked = FALSE;
1162
1163 err = ifnet_attach(ifp, init->nxneti_lladdr);
1164 if (err != 0) {
1165 goto done;
1166 }
1167
1168 ASSERT(err == 0);
1169 /*
1170 * Return ifnet reference held by ifnet_allocate_extended();
1171 * caller is expected to retain this reference until its ifnet
1172 * detach callback is called.
1173 */
1174 *pifp = ifp;
1175 uuid_copy(*nx_uuid, nx->nx_uuid);
1176
1177 done:
1178 if (nxctl_locked) {
1179 NXCTL_UNLOCK(nxctl);
1180 }
1181
1182 /* release extra ref from nx_create */
1183 if (nx != NULL) {
1184 SK_LOCK();
1185 if (err != 0) {
1186 (void) nx_close(nx, TRUE);
1187 }
1188 (void) nx_release_locked(nx);
1189 SK_UNLOCK();
1190 }
1191 if (err != 0 && ifp != NULL) {
1192 ifnet_release(ifp);
1193 }
1194
1195 return err;
1196 }
1197
1198 errno_t
kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid)1199 kern_nexus_controller_free_provider_instance(const nexus_controller_t ncd,
1200 const uuid_t nx_uuid)
1201 {
1202 errno_t err;
1203
1204 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1205 err = EINVAL;
1206 } else {
1207 struct nxctl *nxctl = ncd->ncd_nxctl;
1208 NXCTL_LOCK(nxctl);
1209 err = nx_destroy(nxctl, nx_uuid);
1210 NXCTL_UNLOCK(nxctl);
1211 }
1212 return err;
1213 }
1214
1215 errno_t
kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid,nexus_port_t * port,const pid_t pid,const uuid_t exec_uuid,const void * key,const uint32_t key_len,const uint32_t bind_flags)1216 kern_nexus_controller_bind_provider_instance(const nexus_controller_t ncd,
1217 const uuid_t nx_uuid, nexus_port_t *port, const pid_t pid,
1218 const uuid_t exec_uuid, const void *key, const uint32_t key_len,
1219 const uint32_t bind_flags)
1220 {
1221 struct nx_bind_req nbr;
1222 struct sockopt sopt;
1223 struct nxctl *nxctl;
1224 int err = 0;
1225
1226 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1227 port == NULL) {
1228 return EINVAL;
1229 }
1230
1231 __nexus_bind_req_prepare(&nbr, nx_uuid, *port, pid, exec_uuid,
1232 key, key_len, bind_flags);
1233
1234 bzero(&sopt, sizeof(sopt));
1235 sopt.sopt_dir = SOPT_SET;
1236 sopt.sopt_name = NXOPT_NEXUS_BIND;
1237 sopt.sopt_val = (user_addr_t)&nbr;
1238 sopt.sopt_valsize = sizeof(nbr);
1239 sopt.sopt_p = kernproc;
1240
1241 nxctl = ncd->ncd_nxctl;
1242 NXCTL_LOCK(nxctl);
1243 err = nxctl_set_opt(nxctl, &sopt);
1244 NXCTL_UNLOCK(nxctl);
1245
1246 if (err == 0) {
1247 *port = nbr.nb_port;
1248 }
1249
1250 return err;
1251 }
1252
1253 errno_t
kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd,const uuid_t nx_uuid,const nexus_port_t port)1254 kern_nexus_controller_unbind_provider_instance(const nexus_controller_t ncd,
1255 const uuid_t nx_uuid, const nexus_port_t port)
1256 {
1257 struct nx_unbind_req nbu;
1258 struct sockopt sopt;
1259 struct nxctl *nxctl;
1260 int err = 0;
1261
1262 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1263 return EINVAL;
1264 }
1265
1266 __nexus_unbind_req_prepare(&nbu, nx_uuid, port);
1267
1268 bzero(&sopt, sizeof(sopt));
1269 sopt.sopt_dir = SOPT_SET;
1270 sopt.sopt_name = NXOPT_NEXUS_UNBIND;
1271 sopt.sopt_val = (user_addr_t)&nbu;
1272 sopt.sopt_valsize = sizeof(nbu);
1273 sopt.sopt_p = kernproc;
1274
1275 nxctl = ncd->ncd_nxctl;
1276 NXCTL_LOCK(nxctl);
1277 err = nxctl_set_opt(nxctl, &sopt);
1278 NXCTL_UNLOCK(nxctl);
1279
1280 return err;
1281 }
1282
1283 errno_t
kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd,const uuid_t prov_uuid,nexus_attr_t nxa)1284 kern_nexus_controller_read_provider_attr(const nexus_controller_t ncd,
1285 const uuid_t prov_uuid, nexus_attr_t nxa)
1286 {
1287 struct nxprov_reg_ent nre;
1288 struct nxprov_params *p = &nre.npre_prov_params;
1289 struct sockopt sopt;
1290 struct nxctl *nxctl;
1291 int err = 0;
1292
1293 if (ncd == NULL || prov_uuid == NULL || uuid_is_null(prov_uuid) ||
1294 nxa == NULL) {
1295 return EINVAL;
1296 }
1297
1298 bzero(&nre, sizeof(nre));
1299 bcopy(prov_uuid, nre.npre_prov_uuid, sizeof(uuid_t));
1300
1301 bzero(&sopt, sizeof(sopt));
1302 sopt.sopt_dir = SOPT_GET;
1303 sopt.sopt_name = NXOPT_NEXUS_PROV_ENTRY;
1304 sopt.sopt_val = (user_addr_t)&nre;
1305 sopt.sopt_valsize = sizeof(nre);
1306 sopt.sopt_p = kernproc;
1307
1308 nxctl = ncd->ncd_nxctl;
1309 NXCTL_LOCK(nxctl);
1310 err = nxctl_get_opt(nxctl, &sopt);
1311 NXCTL_UNLOCK(nxctl);
1312
1313 if (err == 0) {
1314 __nexus_attr_from_params(nxa, p);
1315 }
1316
1317 return err;
1318 }
1319
1320 void
kern_nexus_controller_destroy(nexus_controller_t ncd)1321 kern_nexus_controller_destroy(nexus_controller_t ncd)
1322 {
1323 struct nxctl *nxctl;
1324
1325 if (ncd == NULL) {
1326 return;
1327 }
1328
1329 nxctl = ncd->ncd_nxctl;
1330 ASSERT(nxctl != NULL);
1331 ncd->ncd_nxctl = NULL;
1332 nxctl_dtor(nxctl);
1333
1334 ncd_free(ncd);
1335 }
1336
1337 void *
kern_nexus_get_context(const kern_nexus_t nx)1338 kern_nexus_get_context(const kern_nexus_t nx)
1339 {
1340 return nx->nx_ctx;
1341 }
1342
1343 void
kern_nexus_stop(const kern_nexus_t nx)1344 kern_nexus_stop(const kern_nexus_t nx)
1345 {
1346 SK_LOCK();
1347 nx_stop(nx);
1348 SK_UNLOCK();
1349 }
1350
1351 errno_t
kern_nexus_get_pbufpool(const kern_nexus_t nx,kern_pbufpool_t * ptx_pp,kern_pbufpool_t * prx_pp)1352 kern_nexus_get_pbufpool(const kern_nexus_t nx, kern_pbufpool_t *ptx_pp,
1353 kern_pbufpool_t *prx_pp)
1354 {
1355 kern_pbufpool_t tpp = NULL, rpp = NULL;
1356 int err = 0;
1357
1358 if (ptx_pp == NULL && prx_pp == NULL) {
1359 return EINVAL;
1360 }
1361
1362 if (NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info == NULL) {
1363 err = ENOTSUP;
1364 } else {
1365 err = NX_DOM_PROV(nx)->nxdom_prov_nx_mem_info(nx, &tpp, &rpp);
1366 }
1367
1368 if (ptx_pp != NULL) {
1369 *ptx_pp = tpp;
1370 }
1371 if (prx_pp != NULL) {
1372 *prx_pp = rpp;
1373 }
1374
1375 return err;
1376 }
1377
1378 static int
_kern_nexus_ifattach(struct nxctl * nxctl,const uuid_t nx_uuid,struct ifnet * ifp,const uuid_t nx_uuid_attachee,boolean_t host,uuid_t * nx_if_uuid)1379 _kern_nexus_ifattach(struct nxctl *nxctl, const uuid_t nx_uuid,
1380 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
1381 uuid_t *nx_if_uuid)
1382 {
1383 struct nx_cfg_req ncr;
1384 struct nx_spec_req nsr;
1385 struct sockopt sopt;
1386 int err = 0;
1387
1388 NXCTL_LOCK_ASSERT_HELD(nxctl);
1389
1390 if (nx_uuid == NULL || uuid_is_null(nx_uuid)) {
1391 return EINVAL;
1392 }
1393
1394 bzero(&nsr, sizeof(nsr));
1395 if (ifp != NULL) {
1396 if (nx_uuid_attachee != NULL) {
1397 return EINVAL;
1398 }
1399
1400 nsr.nsr_flags = NXSPECREQ_IFP;
1401 nsr.nsr_ifp = ifp;
1402 } else {
1403 if (nx_uuid_attachee == NULL) {
1404 return EINVAL;
1405 }
1406
1407 nsr.nsr_flags = NXSPECREQ_UUID;
1408 if (host) {
1409 nsr.nsr_flags |= NXSPECREQ_HOST;
1410 }
1411
1412 uuid_copy(nsr.nsr_uuid, nx_uuid_attachee);
1413 }
1414 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_ATTACH,
1415 &nsr, sizeof(nsr));
1416
1417 bzero(&sopt, sizeof(sopt));
1418 sopt.sopt_dir = SOPT_SET;
1419 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1420 sopt.sopt_val = (user_addr_t)&ncr;
1421 sopt.sopt_valsize = sizeof(ncr);
1422 sopt.sopt_p = kernproc;
1423
1424 err = nxctl_set_opt(nxctl, &sopt);
1425 if (err == 0 && nx_if_uuid != NULL) {
1426 uuid_copy(*nx_if_uuid, nsr.nsr_if_uuid);
1427 }
1428
1429 return err;
1430 }
1431
1432 int
kern_nexus_ifattach(nexus_controller_t ncd,const uuid_t nx_uuid,struct ifnet * ifp,const uuid_t nx_uuid_attachee,boolean_t host,uuid_t * nx_if_uuid)1433 kern_nexus_ifattach(nexus_controller_t ncd, const uuid_t nx_uuid,
1434 struct ifnet *ifp, const uuid_t nx_uuid_attachee, boolean_t host,
1435 uuid_t *nx_if_uuid)
1436 {
1437 struct nxctl *nxctl;
1438 int err = 0;
1439
1440 if (ncd == NULL) {
1441 return EINVAL;
1442 }
1443
1444 nxctl = ncd->ncd_nxctl;
1445 ASSERT(nxctl != NULL);
1446 NXCTL_LOCK(nxctl);
1447 err = _kern_nexus_ifattach(nxctl, nx_uuid, ifp, nx_uuid_attachee,
1448 host, nx_if_uuid);
1449 NXCTL_UNLOCK(nxctl);
1450
1451 return err;
1452 }
1453
1454 int
kern_nexus_ifdetach(const nexus_controller_t ncd,const uuid_t nx_uuid,const uuid_t nx_if_uuid)1455 kern_nexus_ifdetach(const nexus_controller_t ncd,
1456 const uuid_t nx_uuid, const uuid_t nx_if_uuid)
1457 {
1458 struct nx_cfg_req ncr;
1459 struct nx_spec_req nsr;
1460 struct sockopt sopt;
1461 struct nxctl *nxctl;
1462 int err = 0;
1463
1464 if (ncd == NULL || nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1465 nx_if_uuid == NULL || uuid_is_null(nx_if_uuid)) {
1466 return EINVAL;
1467 }
1468
1469 bzero(&nsr, sizeof(nsr));
1470 uuid_copy(nsr.nsr_if_uuid, nx_if_uuid);
1471
1472 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_DETACH,
1473 &nsr, sizeof(nsr));
1474
1475 bzero(&sopt, sizeof(sopt));
1476 sopt.sopt_dir = SOPT_SET;
1477 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1478 sopt.sopt_val = (user_addr_t)&ncr;
1479 sopt.sopt_valsize = sizeof(ncr);
1480 sopt.sopt_p = kernproc;
1481
1482 nxctl = ncd->ncd_nxctl;
1483 NXCTL_LOCK(nxctl);
1484 err = nxctl_set_opt(nxctl, &sopt);
1485 NXCTL_UNLOCK(nxctl);
1486
1487 return err;
1488 }
1489
1490 int
kern_nexus_get_netif_instance(struct ifnet * ifp,uuid_t nx_uuid)1491 kern_nexus_get_netif_instance(struct ifnet *ifp, uuid_t nx_uuid)
1492 {
1493 struct nexus_netif_adapter *if_na;
1494 int err = 0;
1495
1496 SK_LOCK();
1497 if_na = ifp->if_na;
1498 if (if_na != NULL) {
1499 uuid_copy(nx_uuid, if_na->nifna_up.na_nx->nx_uuid);
1500 } else {
1501 err = ENXIO;
1502 }
1503 SK_UNLOCK();
1504 if (err != 0) {
1505 uuid_clear(nx_uuid);
1506 }
1507
1508 return err;
1509 }
1510
1511 int
kern_nexus_get_flowswitch_instance(struct ifnet * ifp,uuid_t nx_uuid)1512 kern_nexus_get_flowswitch_instance(struct ifnet *ifp, uuid_t nx_uuid)
1513 {
1514 struct nexus_netif_adapter *if_na;
1515 struct nx_flowswitch *fsw = NULL;
1516 int err = 0;
1517
1518 SK_LOCK();
1519 if_na = ifp->if_na;
1520 if (if_na != NULL) {
1521 fsw = ifp->if_na->nifna_netif->nif_fsw;
1522 }
1523 if (fsw != NULL) {
1524 uuid_copy(nx_uuid, fsw->fsw_nx->nx_uuid);
1525 } else {
1526 err = ENXIO;
1527 }
1528 SK_UNLOCK();
1529 if (err != 0) {
1530 uuid_clear(nx_uuid);
1531 }
1532
1533 return err;
1534 }
1535
1536 static void
kern_nexus_netagent_add(struct kern_nexus * nx,void * arg0)1537 kern_nexus_netagent_add(struct kern_nexus *nx, void *arg0)
1538 {
1539 #pragma unused(arg0)
1540 nx_fsw_netagent_add(nx);
1541 }
1542
1543 static void
kern_nexus_netagent_remove(struct kern_nexus * nx,void * arg0)1544 kern_nexus_netagent_remove(struct kern_nexus *nx, void *arg0)
1545 {
1546 #pragma unused(arg0)
1547 nx_fsw_netagent_remove(nx);
1548 }
1549
1550 static void
kern_nexus_netagent_update(struct kern_nexus * nx,void * arg0)1551 kern_nexus_netagent_update(struct kern_nexus *nx, void *arg0)
1552 {
1553 #pragma unused(arg0)
1554 nx_fsw_netagent_update(nx);
1555 }
1556
1557 void
kern_nexus_register_netagents(void)1558 kern_nexus_register_netagents(void)
1559 {
1560 kern_nexus_walktree(kern_nexus_netagent_add, NULL, FALSE);
1561 }
1562
1563 void
kern_nexus_deregister_netagents(void)1564 kern_nexus_deregister_netagents(void)
1565 {
1566 kern_nexus_walktree(kern_nexus_netagent_remove, NULL, FALSE);
1567 }
1568
1569 void
kern_nexus_update_netagents(void)1570 kern_nexus_update_netagents(void)
1571 {
1572 kern_nexus_walktree(kern_nexus_netagent_update, NULL, FALSE);
1573 }
1574
1575 static int
_interface_add_remove_netagent(struct ifnet * ifp,bool add)1576 _interface_add_remove_netagent(struct ifnet *ifp, bool add)
1577 {
1578 struct nexus_netif_adapter *if_na;
1579 int err = ENXIO;
1580
1581 SK_LOCK();
1582 if_na = ifp->if_na;
1583 if (if_na != NULL) {
1584 struct nx_flowswitch *fsw;
1585
1586 fsw = if_na->nifna_netif->nif_fsw;
1587 if (fsw != NULL) {
1588 if (add) {
1589 err = nx_fsw_netagent_add(fsw->fsw_nx);
1590 } else {
1591 err = nx_fsw_netagent_remove(fsw->fsw_nx);
1592 }
1593 }
1594 }
1595 SK_UNLOCK();
1596 return err;
1597 }
1598
1599 int
kern_nexus_interface_add_netagent(struct ifnet * ifp)1600 kern_nexus_interface_add_netagent(struct ifnet *ifp)
1601 {
1602 return _interface_add_remove_netagent(ifp, true);
1603 }
1604
1605 int
kern_nexus_interface_remove_netagent(struct ifnet * ifp)1606 kern_nexus_interface_remove_netagent(struct ifnet *ifp)
1607 {
1608 return _interface_add_remove_netagent(ifp, false);
1609 }
1610
1611 int
kern_nexus_set_netif_input_tbr_rate(struct ifnet * ifp,uint64_t rate)1612 kern_nexus_set_netif_input_tbr_rate(struct ifnet *ifp, uint64_t rate)
1613 {
1614 /* input tbr is only functional with active netif attachment */
1615 if (ifp->if_na == NULL) {
1616 if (rate != 0) {
1617 return EINVAL;
1618 } else {
1619 return 0;
1620 }
1621 }
1622
1623 ifp->if_na->nifna_netif->nif_input_rate = rate;
1624 return 0;
1625 }
1626
1627 int
kern_nexus_set_if_netem_params(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1628 kern_nexus_set_if_netem_params(const nexus_controller_t ncd,
1629 const uuid_t nx_uuid, void *data, size_t data_len)
1630 {
1631 struct nx_cfg_req ncr;
1632 struct sockopt sopt;
1633 struct nxctl *nxctl;
1634 int err = 0;
1635
1636 if (nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1637 data_len < sizeof(struct if_netem_params)) {
1638 return EINVAL;
1639 }
1640
1641 __nexus_config_req_prepare(&ncr, nx_uuid, NXCFG_CMD_NETEM,
1642 data, data_len);
1643 bzero(&sopt, sizeof(sopt));
1644 sopt.sopt_dir = SOPT_SET;
1645 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1646 sopt.sopt_val = (user_addr_t)&ncr;
1647 sopt.sopt_valsize = sizeof(ncr);
1648 sopt.sopt_p = kernproc;
1649
1650 nxctl = ncd->ncd_nxctl;
1651 NXCTL_LOCK(nxctl);
1652 err = nxctl_set_opt(nxctl, &sopt);
1653 NXCTL_UNLOCK(nxctl);
1654
1655 return err;
1656 }
1657
1658 static int
_kern_nexus_flow_config(const nexus_controller_t ncd,const uuid_t nx_uuid,const nxcfg_cmd_t cmd,void * data,size_t data_len)1659 _kern_nexus_flow_config(const nexus_controller_t ncd, const uuid_t nx_uuid,
1660 const nxcfg_cmd_t cmd, void *data, size_t data_len)
1661 {
1662 struct nx_cfg_req ncr;
1663 struct sockopt sopt;
1664 struct nxctl *nxctl;
1665 int err = 0;
1666
1667 if (nx_uuid == NULL || uuid_is_null(nx_uuid) ||
1668 data_len < sizeof(struct nx_flow_req)) {
1669 return EINVAL;
1670 }
1671
1672 __nexus_config_req_prepare(&ncr, nx_uuid, cmd, data, data_len);
1673
1674 bzero(&sopt, sizeof(sopt));
1675 sopt.sopt_dir = SOPT_SET;
1676 sopt.sopt_name = NXOPT_NEXUS_CONFIG;
1677 sopt.sopt_val = (user_addr_t)&ncr;
1678 sopt.sopt_valsize = sizeof(ncr);
1679 sopt.sopt_p = kernproc;
1680
1681 nxctl = ncd->ncd_nxctl;
1682 NXCTL_LOCK(nxctl);
1683 err = nxctl_set_opt(nxctl, &sopt);
1684 NXCTL_UNLOCK(nxctl);
1685
1686 return err;
1687 }
1688
1689 int
kern_nexus_flow_add(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1690 kern_nexus_flow_add(const nexus_controller_t ncd, const uuid_t nx_uuid,
1691 void *data, size_t data_len)
1692 {
1693 return _kern_nexus_flow_config(ncd, nx_uuid, NXCFG_CMD_FLOW_ADD, data,
1694 data_len);
1695 }
1696
1697 int
kern_nexus_flow_del(const nexus_controller_t ncd,const uuid_t nx_uuid,void * data,size_t data_len)1698 kern_nexus_flow_del(const nexus_controller_t ncd, const uuid_t nx_uuid,
1699 void *data, size_t data_len)
1700 {
1701 return _kern_nexus_flow_config(ncd, nx_uuid, NXCFG_CMD_FLOW_DEL, data,
1702 data_len);
1703 }
1704
1705 static struct kern_nexus_domain_provider *
nxdom_prov_alloc(zalloc_flags_t how)1706 nxdom_prov_alloc(zalloc_flags_t how)
1707 {
1708 SK_LOCK_ASSERT_HELD();
1709
1710 return zalloc_flags(nxdom_prov_zone, how | Z_ZERO);
1711 }
1712
1713 static void
nxdom_prov_free(struct kern_nexus_domain_provider * nxdom_prov)1714 nxdom_prov_free(struct kern_nexus_domain_provider *nxdom_prov)
1715 {
1716 SK_LOCK_ASSERT_HELD();
1717
1718 ASSERT(nxdom_prov->nxdom_prov_refcnt == 0);
1719 ASSERT(!(nxdom_prov->nxdom_prov_flags &
1720 (NXDOMPROVF_ATTACHED | NXDOMPROVF_DETACHING)));
1721
1722 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_INITIALIZED) {
1723 /*
1724 * Tell the domain provider that we're done with this
1725 * instance, and it is now free to go away.
1726 */
1727 if (nxdom_prov->nxdom_prov_fini != NULL) {
1728 nxdom_prov->nxdom_prov_fini(nxdom_prov);
1729 }
1730 nxdom_prov->nxdom_prov_flags &= ~NXDOMPROVF_INITIALIZED;
1731 }
1732 uuid_clear(nxdom_prov->nxdom_prov_uuid);
1733 nxdom_prov->nxdom_prov_dom = NULL;
1734
1735 SK_DF(SK_VERB_MEM, "nxdom_prov 0x%llx %s", SK_KVA(nxdom_prov),
1736 ((nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) ?
1737 "FREE" : "DESTROY"));
1738 if (nxdom_prov->nxdom_prov_flags & NXDOMPROVF_EXT) {
1739 zfree(nxdom_prov_zone, nxdom_prov);
1740 }
1741 }
1742
1743 void
nxdom_prov_retain_locked(struct kern_nexus_domain_provider * nxdom_prov)1744 nxdom_prov_retain_locked(struct kern_nexus_domain_provider *nxdom_prov)
1745 {
1746 SK_LOCK_ASSERT_HELD();
1747
1748 nxdom_prov->nxdom_prov_refcnt++;
1749 ASSERT(nxdom_prov->nxdom_prov_refcnt != 0);
1750 }
1751
1752 void
nxdom_prov_retain(struct kern_nexus_domain_provider * nxdom_prov)1753 nxdom_prov_retain(struct kern_nexus_domain_provider *nxdom_prov)
1754 {
1755 SK_LOCK();
1756 nxdom_prov_retain_locked(nxdom_prov);
1757 SK_UNLOCK();
1758 }
1759
1760 static int
nxdom_prov_params_default(struct kern_nexus_domain_provider * nxdom_prov,const uint32_t req,const struct nxprov_params * nxp0,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS])1761 nxdom_prov_params_default(struct kern_nexus_domain_provider *nxdom_prov,
1762 const uint32_t req, const struct nxprov_params *nxp0,
1763 struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS])
1764 {
1765 struct nxdom *nxdom = nxdom_prov->nxdom_prov_dom;
1766
1767 return nxprov_params_adjust(nxdom_prov, req, nxp0, nxp, srp,
1768 nxdom, nxdom, nxdom, NULL);
1769 }
1770
1771 int
nxdom_prov_validate_params(struct kern_nexus_domain_provider * nxdom_prov,const struct nxprov_reg * reg,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],const uint32_t oflags)1772 nxdom_prov_validate_params(struct kern_nexus_domain_provider *nxdom_prov,
1773 const struct nxprov_reg *reg, struct nxprov_params *nxp,
1774 struct skmem_region_params srp[SKMEM_REGIONS], const uint32_t oflags)
1775 {
1776 const struct nxprov_params *nxp0 = ®->nxpreg_params;
1777 const uint32_t req = reg->nxpreg_requested;
1778 int i, err = 0;
1779
1780 ASSERT(reg->nxpreg_version == NXPROV_REG_CURRENT_VERSION &&
1781 nxp0->nxp_namelen != 0 &&
1782 nxp0->nxp_namelen <= sizeof(nexus_name_t));
1783
1784 /* fill in with default values and let the nexus override them */
1785 bzero(nxp, sizeof(*nxp));
1786 bcopy(&nxp0->nxp_name, &nxp->nxp_name, sizeof(nxp->nxp_name));
1787 nxp->nxp_name[sizeof(nxp->nxp_name) - 1] = '\0';
1788 nxp->nxp_namelen = nxp0->nxp_namelen;
1789 nxp->nxp_type = nxp0->nxp_type;
1790 nxp->nxp_md_type = nxdom_prov->nxdom_prov_dom->nxdom_md_type;
1791 nxp->nxp_md_subtype = nxdom_prov->nxdom_prov_dom->nxdom_md_subtype;
1792 nxp->nxp_flags = (nxp0->nxp_flags & NXPF_MASK);
1793 nxp->nxp_flags |= oflags; /* override */
1794 nxp->nxp_format = nxp0->nxp_format;
1795 nxp->nxp_ifindex = nxp0->nxp_ifindex;
1796 nxp->nxp_reject_on_close = nxp0->nxp_reject_on_close;
1797
1798 /* inherit default region parameters */
1799 for (i = 0; i < SKMEM_REGIONS; i++) {
1800 srp[i] = *skmem_get_default(i);
1801 }
1802
1803 if (nxdom_prov->nxdom_prov_params != NULL) {
1804 err = nxdom_prov->nxdom_prov_params(nxdom_prov, req, nxp0,
1805 nxp, srp);
1806 } else {
1807 err = nxdom_prov_params_default(nxdom_prov, req, nxp0,
1808 nxp, srp);
1809 }
1810 return err;
1811 }
1812
1813 boolean_t
nxdom_prov_release_locked(struct kern_nexus_domain_provider * nxdom_prov)1814 nxdom_prov_release_locked(struct kern_nexus_domain_provider *nxdom_prov)
1815 {
1816 int oldref = nxdom_prov->nxdom_prov_refcnt;
1817
1818 SK_LOCK_ASSERT_HELD();
1819
1820 ASSERT(nxdom_prov->nxdom_prov_refcnt != 0);
1821 if (--nxdom_prov->nxdom_prov_refcnt == 0) {
1822 nxdom_prov_free(nxdom_prov);
1823 }
1824
1825 return oldref == 1;
1826 }
1827
1828 boolean_t
nxdom_prov_release(struct kern_nexus_domain_provider * nxdom_prov)1829 nxdom_prov_release(struct kern_nexus_domain_provider *nxdom_prov)
1830 {
1831 boolean_t lastref;
1832
1833 SK_LOCK();
1834 lastref = nxdom_prov_release_locked(nxdom_prov);
1835 SK_UNLOCK();
1836
1837 return lastref;
1838 }
1839
1840 static uint32_t
nxprov_bound_var(uint32_t * v,uint32_t dflt,uint32_t lo,uint32_t hi,const char * msg)1841 nxprov_bound_var(uint32_t *v, uint32_t dflt, uint32_t lo, uint32_t hi,
1842 const char *msg)
1843 {
1844 #pragma unused(msg)
1845 uint32_t oldv = *v;
1846 const char *op = NULL;
1847
1848 if (dflt < lo) {
1849 dflt = lo;
1850 }
1851 if (dflt > hi) {
1852 dflt = hi;
1853 }
1854 if (oldv < lo) {
1855 *v = dflt;
1856 op = "bump";
1857 } else if (oldv > hi) {
1858 *v = hi;
1859 op = "clamp";
1860 }
1861 #if SK_LOG
1862 if (op != NULL && msg != NULL) {
1863 SK_ERR("%s %s to %u (was %u)", op, msg, *v, oldv);
1864 }
1865 #endif /* SK_LOG */
1866 return *v;
1867 }
1868
1869 #define NXPROV_PARAMS_ADJUST(flag, param) do { \
1870 uint32_t _v0, _v; \
1871 if (req & (flag)) \
1872 _v = nxp0->nxp_##param; \
1873 else \
1874 _v = NXDOM_DEF(nxdom_def, param); \
1875 _v0 = _v; \
1876 if (nxprov_bound_var(&_v, NXDOM_DEF(nxdom_def, param), \
1877 NXDOM_MIN(nxdom_min, param), NXDOM_MAX(nxdom_max, param), \
1878 "nxp_" #param) < _v0) { \
1879 err = ENOMEM; \
1880 goto error; \
1881 } \
1882 nxp->nxp_##param = _v; \
1883 } while (0)
1884
1885 #define MUL(x, y, z) do { \
1886 if (__builtin_mul_overflow((x), (y), (z))) { \
1887 overflowline = __LINE__; \
1888 goto error; \
1889 } \
1890 } while (0)
1891
1892 #define ADD(x, y, z) do { \
1893 if (__builtin_add_overflow((x), (y), (z))) { \
1894 overflowline = __LINE__; \
1895 goto error; \
1896 } \
1897 } while (0)
1898
1899 int
nxprov_params_adjust(struct kern_nexus_domain_provider * nxdom_prov,const uint32_t req,const struct nxprov_params * nxp0,struct nxprov_params * nxp,struct skmem_region_params srp[SKMEM_REGIONS],const struct nxdom * nxdom_def,const struct nxdom * nxdom_min,const struct nxdom * nxdom_max,int (* adjust_fn)(const struct kern_nexus_domain_provider *,const struct nxprov_params *,struct nxprov_adjusted_params *))1900 nxprov_params_adjust(struct kern_nexus_domain_provider *nxdom_prov,
1901 const uint32_t req, const struct nxprov_params *nxp0,
1902 struct nxprov_params *nxp, struct skmem_region_params srp[SKMEM_REGIONS],
1903 const struct nxdom *nxdom_def, const struct nxdom *nxdom_min,
1904 const struct nxdom *nxdom_max,
1905 int (*adjust_fn)(const struct kern_nexus_domain_provider *,
1906 const struct nxprov_params *, struct nxprov_adjusted_params *))
1907 {
1908 uint32_t buf_cnt;
1909 uint32_t stats_size;
1910 uint32_t flowadv_max;
1911 uint32_t nexusadv_size;
1912 uint32_t capabs;
1913 uint32_t tx_rings, rx_rings, alloc_rings = 0, free_rings = 0, ev_rings = 0;
1914 uint32_t tx_slots, rx_slots, alloc_slots = 0, free_slots = 0, ev_slots = 0;
1915 uint32_t buf_size, max_buffers = 0;
1916 uint32_t tmp1, tmp2, tmp3, tmp4xpipes, tmpsumrings;
1917 uint32_t tmpsumall, tmp4xpipesplusrings;
1918 boolean_t md_magazines;
1919 int overflowline = 0;
1920 struct skmem_region_params *ubft_srp = NULL;
1921 struct skmem_region_params *kbft_srp = NULL;
1922 int err = 0;
1923
1924 NXPROV_PARAMS_ADJUST(NXPREQ_TX_RINGS, tx_rings);
1925 NXPROV_PARAMS_ADJUST(NXPREQ_RX_RINGS, rx_rings);
1926 NXPROV_PARAMS_ADJUST(NXPREQ_TX_SLOTS, tx_slots);
1927 NXPROV_PARAMS_ADJUST(NXPREQ_RX_SLOTS, rx_slots);
1928 NXPROV_PARAMS_ADJUST(NXPREQ_BUF_SIZE, buf_size);
1929 NXPROV_PARAMS_ADJUST(NXPREQ_STATS_SIZE, stats_size);
1930 NXPROV_PARAMS_ADJUST(NXPREQ_FLOWADV_MAX, flowadv_max);
1931 NXPROV_PARAMS_ADJUST(NXPREQ_NEXUSADV_SIZE, nexusadv_size);
1932 NXPROV_PARAMS_ADJUST(NXPREQ_PIPES, pipes);
1933 NXPROV_PARAMS_ADJUST(NXPREQ_EXTENSIONS, extensions);
1934 NXPROV_PARAMS_ADJUST(NXPREQ_MHINTS, mhints);
1935 NXPROV_PARAMS_ADJUST(NXPREQ_CAPABILITIES, capabilities);
1936 NXPROV_PARAMS_ADJUST(NXPREQ_QMAP, qmap);
1937 NXPROV_PARAMS_ADJUST(NXPREQ_MAX_FRAGS, max_frags);
1938
1939 capabs = NXDOM_DEF(nxdom_def, capabilities);
1940 if (req & NXPREQ_USER_CHANNEL) {
1941 if (nxp->nxp_flags & NXPF_USER_CHANNEL) {
1942 capabs |= NXPCAP_USER_CHANNEL;
1943 } else {
1944 capabs &= ~NXPCAP_USER_CHANNEL;
1945 }
1946 } else {
1947 if (capabs & NXPCAP_USER_CHANNEL) {
1948 nxp->nxp_flags |= NXPF_USER_CHANNEL;
1949 } else {
1950 nxp->nxp_flags &= ~NXPF_USER_CHANNEL;
1951 }
1952 }
1953
1954 if (NXDOM_MIN(nxdom_min, capabilities) != 0 &&
1955 !(capabs & NXDOM_MIN(nxdom_min, capabilities))) {
1956 SK_ERR("%s: caps 0x%b < min 0x%b",
1957 nxdom_prov->nxdom_prov_name, capabs, NXPCAP_BITS,
1958 NXDOM_MIN(nxdom_min, capabilities), NXPCAP_BITS);
1959 err = EINVAL;
1960 goto error;
1961 } else if (NXDOM_MAX(nxdom_max, capabilities) != 0 &&
1962 (capabs & ~NXDOM_MAX(nxdom_max, capabilities))) {
1963 SK_ERR("%s: caps 0x%b > max 0x%b",
1964 nxdom_prov->nxdom_prov_name, capabs, NXPCAP_BITS,
1965 NXDOM_MAX(nxdom_max, capabilities), NXPCAP_BITS);
1966 err = EINVAL;
1967 goto error;
1968 }
1969
1970 stats_size = nxp->nxp_stats_size;
1971 flowadv_max = nxp->nxp_flowadv_max;
1972 nexusadv_size = nxp->nxp_nexusadv_size;
1973 tx_rings = nxp->nxp_tx_rings;
1974 rx_rings = nxp->nxp_rx_rings;
1975 tx_slots = nxp->nxp_tx_slots;
1976 rx_slots = nxp->nxp_rx_slots;
1977 buf_size = nxp->nxp_buf_size;
1978
1979 ASSERT((srp[SKMEM_REGION_UMD].srp_cflags & SKMEM_REGION_CR_NOMAGAZINES) ==
1980 (srp[SKMEM_REGION_RXKMD].srp_cflags & SKMEM_REGION_CR_NOMAGAZINES));
1981 md_magazines = !(srp[SKMEM_REGION_UMD].srp_cflags &
1982 SKMEM_REGION_CR_NOMAGAZINES);
1983
1984 if (adjust_fn != NULL) {
1985 struct nxprov_adjusted_params adj = {
1986 .adj_md_subtype = &nxp->nxp_md_subtype,
1987 .adj_md_magazines = &md_magazines,
1988 .adj_stats_size = &stats_size,
1989 .adj_flowadv_max = &flowadv_max,
1990 .adj_nexusadv_size = &nexusadv_size,
1991 .adj_caps = &capabs,
1992 .adj_tx_rings = &tx_rings,
1993 .adj_rx_rings = &rx_rings,
1994 .adj_tx_slots = &tx_slots,
1995 .adj_rx_slots = &rx_slots,
1996 .adj_alloc_rings = &alloc_rings,
1997 .adj_free_rings = &free_rings,
1998 .adj_alloc_slots = &alloc_slots,
1999 .adj_free_slots = &free_slots,
2000 .adj_buf_size = &buf_size,
2001 .adj_buf_srp = &srp[SKMEM_REGION_BUF],
2002 .adj_max_frags = &nxp->nxp_max_frags,
2003 .adj_event_rings = &ev_rings,
2004 .adj_event_slots = &ev_slots,
2005 .adj_max_buffers = &max_buffers,
2006 };
2007 err = adjust_fn(nxdom_prov, nxp, &adj);
2008 if (err != 0) {
2009 goto error;
2010 }
2011
2012 ASSERT(capabs >= NXDOM_MIN(nxdom_min, capabilities));
2013 ASSERT(capabs <= NXDOM_MAX(nxdom_max, capabilities));
2014 }
2015
2016 if (nxp->nxp_max_frags > UINT16_MAX) {
2017 SK_ERR("invalid configuration for max frags %d",
2018 nxp->nxp_max_frags);
2019 err = EINVAL;
2020 }
2021
2022 if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) {
2023 if (tx_rings != rx_rings) {
2024 SK_ERR("invalid configuration: {rx,tx} rings must be"
2025 "in pairs for user pipe rx_rings(%d) tx_rings(%d)",
2026 rx_rings, tx_rings);
2027 err = EINVAL;
2028 }
2029 } else {
2030 if (nxp->nxp_pipes != 0) {
2031 SK_ERR("invalid configuration: pipe configuration is"
2032 "only valid for user pipe nexus, type %d, pipes %d",
2033 nxp->nxp_type, nxp->nxp_pipes);
2034 err = EINVAL;
2035 }
2036 }
2037 if (err != 0) {
2038 goto error;
2039 }
2040
2041 /* leading and trailing guard pages (if applicable) */
2042 if (sk_guard) {
2043 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = SKMEM_PAGE_SIZE;
2044 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = sk_headguard_sz;
2045 skmem_region_params_config(&srp[SKMEM_REGION_GUARD_HEAD]);
2046 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = SKMEM_PAGE_SIZE;
2047 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = sk_tailguard_sz;
2048 skmem_region_params_config(&srp[SKMEM_REGION_GUARD_TAIL]);
2049 } else {
2050 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size = 0;
2051 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt = 0;
2052 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size = 0;
2053 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt = 0;
2054 }
2055
2056 /* update to the adjusted/configured values */
2057 nxp->nxp_buf_size = buf_size;
2058 nxp->nxp_tx_slots = tx_slots;
2059 nxp->nxp_rx_slots = rx_slots;
2060
2061 SK_D("nxdom \"%s\" (0x%llx) type %d",
2062 nxdom_prov->nxdom_prov_dom->nxdom_name,
2063 SK_KVA(nxdom_prov->nxdom_prov_dom),
2064 nxdom_prov->nxdom_prov_dom->nxdom_type);
2065 SK_D("nxp \"%s\" (0x%llx) flags 0x%b",
2066 nxp->nxp_name, SK_KVA(nxp), nxp->nxp_flags, NXPF_BITS);
2067 SK_D(" req 0x%b rings %u/%u/%u/%u/%u slots %u/%u/%u/%u/%u buf %u "
2068 "type %u subtype %u stats %u flowadv_max %u nexusadv_size %u "
2069 "capabs 0x%b pipes %u extensions %u max_frags %u headguard %u "
2070 "tailguard %u", req, NXPREQ_BITS, tx_rings, rx_rings, alloc_rings,
2071 free_rings, ev_rings, tx_slots, rx_slots, alloc_slots, free_slots,
2072 ev_slots, nxp->nxp_buf_size, nxp->nxp_md_type, nxp->nxp_md_subtype,
2073 stats_size, flowadv_max, nexusadv_size, capabs, NXPCAP_BITS,
2074 nxp->nxp_pipes, nxp->nxp_extensions, nxp->nxp_max_frags,
2075 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_size *
2076 srp[SKMEM_REGION_GUARD_HEAD].srp_r_obj_cnt,
2077 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_size *
2078 srp[SKMEM_REGION_GUARD_TAIL].srp_r_obj_cnt);
2079
2080 /*
2081 * tmp4xpipes = 4 * nxp->nxp_pipes
2082 */
2083 MUL(4, nxp->nxp_pipes, &tmp4xpipes);
2084
2085 /*
2086 * tmp4xpipesplusrings = tx_rings + (4 * nxp->nxp_pipes)
2087 */
2088 VERIFY((tmp4xpipes == 0) || (rx_rings == tx_rings));
2089 ADD(tx_rings, tmp4xpipes, &tmp4xpipesplusrings);
2090
2091 /*
2092 * tmpsumrings = tx_rings + rx_rings + alloc_rings + free_rings + ev_rings
2093 */
2094 ADD(tx_rings, rx_rings, &tmpsumrings);
2095 ADD(tmpsumrings, alloc_rings, &tmpsumrings);
2096 ADD(tmpsumrings, free_rings, &tmpsumrings);
2097 ADD(tmpsumrings, ev_rings, &tmpsumrings);
2098
2099 /*
2100 * tmpsumall = (tx_rings + rx_rings +
2101 * alloc_rings + free_rings + ev_rings + (4 * nxp->nxp_pipes))
2102 */
2103 ADD(tmpsumrings, tmp4xpipes, &tmpsumall);
2104
2105 /* possibly increase them to fit user request */
2106 VERIFY(CHANNEL_SCHEMA_SIZE(tmpsumrings) <= UINT32_MAX);
2107 srp[SKMEM_REGION_SCHEMA].srp_r_obj_size =
2108 (uint32_t)CHANNEL_SCHEMA_SIZE(tmpsumrings);
2109 /* worst case is one channel bound to each ring pair */
2110 srp[SKMEM_REGION_SCHEMA].srp_r_obj_cnt = tmp4xpipesplusrings;
2111
2112 skmem_region_params_config(&srp[SKMEM_REGION_SCHEMA]);
2113
2114 srp[SKMEM_REGION_RING].srp_r_obj_size =
2115 sizeof(struct __user_channel_ring);
2116 /* each pipe endpoint needs two tx rings and two rx rings */
2117 srp[SKMEM_REGION_RING].srp_r_obj_cnt = tmpsumall;
2118 skmem_region_params_config(&srp[SKMEM_REGION_RING]);
2119
2120 /*
2121 * For each pipe we only need the buffers for the "real" rings.
2122 * On the other end, the pipe ring dimension may be different from
2123 * the parent port ring dimension. As a compromise, we allocate twice
2124 * the space actually needed if the pipe rings were the same size as
2125 * the parent rings.
2126 *
2127 * buf_cnt = ((4 * nxp->nxp_pipes) + rx_rings) * rx_slots +
2128 * ((4 * nxp->nxp_pipes) + tx_rings) * tx_slots +
2129 * (ev_rings * ev_slots);
2130 */
2131 if (nxp->nxp_type == NEXUS_TYPE_USER_PIPE) {
2132 MUL(tmp4xpipesplusrings, rx_slots, &tmp1);
2133 MUL(tmp4xpipesplusrings, tx_slots, &tmp2);
2134 ASSERT(ev_rings == 0);
2135 tmp3 = 0;
2136 } else {
2137 MUL(rx_rings, rx_slots, &tmp1);
2138 MUL(tx_rings, tx_slots, &tmp2);
2139 MUL(ev_rings, ev_slots, &tmp3);
2140 }
2141 ADD(tmp1, tmp2, &buf_cnt);
2142 ADD(tmp3, buf_cnt, &buf_cnt);
2143
2144 if (nxp->nxp_max_frags > 1) {
2145 buf_cnt = MIN((((uint32_t)P2ROUNDUP(NX_MAX_AGGR_PKT_SIZE,
2146 nxp->nxp_buf_size) / nxp->nxp_buf_size) * buf_cnt),
2147 (buf_cnt * nxp->nxp_max_frags));
2148 }
2149
2150 if (max_buffers != 0) {
2151 buf_cnt = MIN(max_buffers, buf_cnt);
2152 }
2153
2154 /* enable/disable magazines layer on metadata regions */
2155 if (md_magazines) {
2156 srp[SKMEM_REGION_UMD].srp_cflags &=
2157 ~SKMEM_REGION_CR_NOMAGAZINES;
2158 srp[SKMEM_REGION_KMD].srp_cflags &=
2159 ~SKMEM_REGION_CR_NOMAGAZINES;
2160 } else {
2161 srp[SKMEM_REGION_UMD].srp_cflags |=
2162 SKMEM_REGION_CR_NOMAGAZINES;
2163 srp[SKMEM_REGION_KMD].srp_cflags |=
2164 SKMEM_REGION_CR_NOMAGAZINES;
2165 }
2166
2167 if (nxp->nxp_max_frags > 1) {
2168 kbft_srp = &srp[SKMEM_REGION_KBFT];
2169 kbft_srp->srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
2170 }
2171 if ((kbft_srp != NULL) && (nxp->nxp_flags & NXPF_USER_CHANNEL)) {
2172 ubft_srp = &srp[SKMEM_REGION_UBFT];
2173 ubft_srp->srp_cflags &= ~SKMEM_REGION_CR_NOMAGAZINES;
2174 }
2175
2176 /* # of metadata objects is same as the # of buffer objects */
2177 pp_regions_params_adjust(&srp[SKMEM_REGION_BUF], &srp[SKMEM_REGION_KMD],
2178 &srp[SKMEM_REGION_UMD], kbft_srp, ubft_srp, nxp->nxp_md_type,
2179 nxp->nxp_md_subtype, buf_cnt, (uint16_t)nxp->nxp_max_frags,
2180 nxp->nxp_buf_size, buf_cnt);
2181
2182 /* statistics region size */
2183 if (stats_size != 0) {
2184 srp[SKMEM_REGION_USTATS].srp_r_obj_size = stats_size;
2185 srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 1;
2186 skmem_region_params_config(&srp[SKMEM_REGION_USTATS]);
2187 } else {
2188 srp[SKMEM_REGION_USTATS].srp_r_obj_size = 0;
2189 srp[SKMEM_REGION_USTATS].srp_r_obj_cnt = 0;
2190 srp[SKMEM_REGION_USTATS].srp_c_obj_size = 0;
2191 srp[SKMEM_REGION_USTATS].srp_c_obj_cnt = 0;
2192 }
2193
2194 /* flow advisory region size */
2195 if (flowadv_max != 0) {
2196 MUL(sizeof(struct __flowadv_entry), flowadv_max, &tmp1);
2197 srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = tmp1;
2198 srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 1;
2199 skmem_region_params_config(&srp[SKMEM_REGION_FLOWADV]);
2200 } else {
2201 srp[SKMEM_REGION_FLOWADV].srp_r_obj_size = 0;
2202 srp[SKMEM_REGION_FLOWADV].srp_r_obj_cnt = 0;
2203 srp[SKMEM_REGION_FLOWADV].srp_c_obj_size = 0;
2204 srp[SKMEM_REGION_FLOWADV].srp_c_obj_cnt = 0;
2205 }
2206
2207 /* nexus advisory region size */
2208 if (nexusadv_size != 0) {
2209 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = nexusadv_size +
2210 sizeof(struct __kern_nexus_adv_metadata);
2211 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 1;
2212 skmem_region_params_config(&srp[SKMEM_REGION_NEXUSADV]);
2213 } else {
2214 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_size = 0;
2215 srp[SKMEM_REGION_NEXUSADV].srp_r_obj_cnt = 0;
2216 srp[SKMEM_REGION_NEXUSADV].srp_c_obj_size = 0;
2217 srp[SKMEM_REGION_NEXUSADV].srp_c_obj_cnt = 0;
2218 }
2219
2220 /* sysctls region is not applicable to nexus */
2221 srp[SKMEM_REGION_SYSCTLS].srp_r_obj_size = 0;
2222 srp[SKMEM_REGION_SYSCTLS].srp_r_obj_cnt = 0;
2223 srp[SKMEM_REGION_SYSCTLS].srp_c_obj_size = 0;
2224 srp[SKMEM_REGION_SYSCTLS].srp_c_obj_cnt = 0;
2225
2226 /*
2227 * Since the tx/alloc/event slots share the same region and cache,
2228 * we will use the same object size for both types of slots.
2229 */
2230 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size =
2231 (MAX(MAX(tx_slots, alloc_slots), ev_slots)) * SLOT_DESC_SZ;
2232 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt = tx_rings + alloc_rings + ev_rings;
2233 skmem_region_params_config(&srp[SKMEM_REGION_TXAKSD]);
2234
2235 /* USD and KSD objects share the same size and count */
2236 srp[SKMEM_REGION_TXAUSD].srp_r_obj_size =
2237 srp[SKMEM_REGION_TXAKSD].srp_r_obj_size;
2238 srp[SKMEM_REGION_TXAUSD].srp_r_obj_cnt =
2239 srp[SKMEM_REGION_TXAKSD].srp_r_obj_cnt;
2240 skmem_region_params_config(&srp[SKMEM_REGION_TXAUSD]);
2241
2242 /*
2243 * Since the rx/free slots share the same region and cache,
2244 * we will use the same object size for both types of slots.
2245 */
2246 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size =
2247 MAX(rx_slots, free_slots) * SLOT_DESC_SZ;
2248 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt = rx_rings + free_rings;
2249 skmem_region_params_config(&srp[SKMEM_REGION_RXFKSD]);
2250
2251 /* USD and KSD objects share the same size and count */
2252 srp[SKMEM_REGION_RXFUSD].srp_r_obj_size =
2253 srp[SKMEM_REGION_RXFKSD].srp_r_obj_size;
2254 srp[SKMEM_REGION_RXFUSD].srp_r_obj_cnt =
2255 srp[SKMEM_REGION_RXFKSD].srp_r_obj_cnt;
2256 skmem_region_params_config(&srp[SKMEM_REGION_RXFUSD]);
2257
2258 /* update these based on the adjusted/configured values */
2259 nxp->nxp_meta_size = srp[SKMEM_REGION_KMD].srp_c_obj_size;
2260 nxp->nxp_stats_size = stats_size;
2261 nxp->nxp_flowadv_max = flowadv_max;
2262 nxp->nxp_nexusadv_size = nexusadv_size;
2263 nxp->nxp_capabilities = capabs;
2264
2265 error:
2266 if (overflowline) {
2267 err = EOVERFLOW;
2268 SK_ERR("math overflow in %s on line %d",
2269 __func__, overflowline);
2270 }
2271 return err;
2272 }
2273
2274 #undef ADD
2275 #undef MUL
2276 #undef NXPROV_PARAMS_ADJUST
2277
2278 static void
nxprov_detaching_enqueue(struct kern_nexus_domain_provider * nxdom_prov)2279 nxprov_detaching_enqueue(struct kern_nexus_domain_provider *nxdom_prov)
2280 {
2281 SK_LOCK_ASSERT_HELD();
2282
2283 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
2284 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
2285
2286 ++nxprov_detaching_cnt;
2287 ASSERT(nxprov_detaching_cnt != 0);
2288 /*
2289 * Insert this to the detaching list; caller is expected to
2290 * have held a reference, most likely the same one that was
2291 * used for the per-domain provider list.
2292 */
2293 STAILQ_INSERT_TAIL(&nxprov_detaching_head, nxdom_prov,
2294 nxdom_prov_detaching_link);
2295 wakeup((caddr_t)&nxprov_detach_wchan);
2296 }
2297
2298 static struct kern_nexus_domain_provider *
nxprov_detaching_dequeue(void)2299 nxprov_detaching_dequeue(void)
2300 {
2301 struct kern_nexus_domain_provider *nxdom_prov;
2302
2303 SK_LOCK_ASSERT_HELD();
2304
2305 nxdom_prov = STAILQ_FIRST(&nxprov_detaching_head);
2306 ASSERT(nxprov_detaching_cnt != 0 || nxdom_prov == NULL);
2307 if (nxdom_prov != NULL) {
2308 ASSERT((nxdom_prov->nxdom_prov_flags & (NXDOMPROVF_ATTACHED |
2309 NXDOMPROVF_DETACHING)) == NXDOMPROVF_DETACHING);
2310 ASSERT(nxprov_detaching_cnt != 0);
2311 --nxprov_detaching_cnt;
2312 STAILQ_REMOVE(&nxprov_detaching_head, nxdom_prov,
2313 kern_nexus_domain_provider, nxdom_prov_detaching_link);
2314 }
2315 return nxdom_prov;
2316 }
2317
2318 __attribute__((noreturn))
2319 static void
nxprov_detacher(void * v,wait_result_t w)2320 nxprov_detacher(void *v, wait_result_t w)
2321 {
2322 #pragma unused(v, w)
2323 SK_LOCK();
2324 (void) msleep0(&nxprov_detach_wchan, &sk_lock, (PZERO - 1),
2325 __func__, 0, nxprov_detacher_cont);
2326 /*
2327 * msleep0() shouldn't have returned as PCATCH was not set;
2328 * therefore assert in this case.
2329 */
2330 SK_UNLOCK();
2331 VERIFY(0);
2332 /* NOTREACHED */
2333 __builtin_unreachable();
2334 }
2335
2336 static int
nxprov_detacher_cont(int err)2337 nxprov_detacher_cont(int err)
2338 {
2339 #pragma unused(err)
2340 struct kern_nexus_domain_provider *nxdom_prov;
2341
2342 for (;;) {
2343 SK_LOCK_ASSERT_HELD();
2344 while (nxprov_detaching_cnt == 0) {
2345 (void) msleep0(&nxprov_detach_wchan, &sk_lock,
2346 (PZERO - 1), __func__, 0, nxprov_detacher_cont);
2347 /* NOTREACHED */
2348 }
2349
2350 ASSERT(STAILQ_FIRST(&nxprov_detaching_head) != NULL);
2351
2352 nxdom_prov = nxprov_detaching_dequeue();
2353 if (nxdom_prov != NULL) {
2354 nxdom_del_provider_final(nxdom_prov);
2355 }
2356 }
2357 }
2358