1 /*
2 * Copyright (c) 2015-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri.
31 * All rights reserved.
32 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 #ifndef _SKYWALK_NEXUS_NEXUSVAR_H_
57 #define _SKYWALK_NEXUS_NEXUSVAR_H_
58
59 #ifdef BSD_KERNEL_PRIVATE
60 #include <skywalk/core/skywalk_var.h>
61 #include <skywalk/os_nexus_private.h>
62
63 struct chreq;
64 struct nxdom;
65 struct kern_channel;
66 struct kern_nexus_domain_provider;
67
68 /*
69 * Nexus controller instance.
70 */
71 struct nxctl {
72 decl_lck_mtx_data(, nxctl_lock);
73 uint32_t nxctl_refcnt;
74 uint32_t nxctl_flags;
75 uuid_t nxctl_uuid;
76 uuid_t nxctl_proc_uuid;
77 uint64_t nxctl_proc_uniqueid;
78 STAILQ_ENTRY(nxctl) nxctl_link;
79 struct fileproc *nxctl_fp;
80 kauth_cred_t nxctl_cred;
81 void *nxctl_traffic_rule_storage;
82 };
83
84 #define NEXUSCTLF_ATTACHED 0x1
85 #define NEXUSCTLF_NOFDREF 0x2
86 #define NEXUSCTLF_KERNEL 0x4
87
88 #define NEXUSCTLF_BITS \
89 "\020\01ATTACHED\02NOFDREF\03KERNEL"
90
91 /*
92 * Nexus port binding structure.
93 */
94 struct nxbind {
95 uint32_t nxb_flags;
96 pid_t nxb_pid;
97 uint64_t nxb_uniqueid;
98 uuid_t nxb_exec_uuid;
99 uint32_t nxb_key_len;
100 void *nxb_key;
101 };
102
103 #define NXBF_MATCH_UNIQUEID 0x1 /* match against process's unique ID */
104 #define NXBF_MATCH_EXEC_UUID 0x2 /* match against executable's UUID */
105 #define NXBF_MATCH_KEY 0x4 /* match against key blob */
106
107 #define NXBF_BITS \
108 "\020\01UNIQUEID\02EXEC_UUID\03KEY"
109
110 /*
111 * Nexus port info structure.
112 */
113 struct nx_port_info {
114 /*
115 * We need to store some states on the nexus port info,
116 * e.g. defunct. The states are encoded in the tagged
117 * pointer handle npi_nah.
118 */
119 uintptr_t npi_nah;
120 struct nxbind *npi_nxb;
121 void *npi_info;
122 };
123
124 /*
125 * Used for indicating what type is attached to npi_info
126 * The type enum is defined here. One namespace for all nexus types.
127 * The actual structure is defined in nexus specific headers.
128 */
129 typedef enum {
130 NX_PORT_INFO_TYPE_NETIF = 0x10000001
131 } nx_port_info_type_t;
132
133 /*
134 * Header of nexus specific structure npi_info
135 */
136 struct nx_port_info_header {
137 nx_port_info_type_t ih_type;
138 size_t ih_size;
139 };
140
141 #define NX_PORT_CHUNK 64
142 #define NX_PORT_CHUNK_FREE 0xffffffffffffffff /* entire chunk is free */
143
144 /*
145 * Nexus port state type.
146 *
147 * Be mindful that due to the use of tagger pointer for nexus adapter in the
148 * nexus port info structure, this type gets encoded with the requirement
149 * that the object addresses are aligned on 4-bytes boundary at the minimum.
150 * That leaves 2 bits for the states, therefore limiting the maximum enum
151 * value to 3.
152 */
153 typedef enum {
154 NEXUS_PORT_STATE_WORKING = 0, /* fully operational */
155 NEXUS_PORT_STATE_DEFUNCT, /* no longer in service */
156 NEXUS_PORT_STATE_RESERVED_1, /* for future use */
157 NEXUS_PORT_STATE_RESERVED_2, /* for future use */
158 NEXUS_PORT_STATE_MAX = NEXUS_PORT_STATE_RESERVED_2
159 } nexus_port_state_t;
160
161 #define NPI_NA_STATE_MASK ((uintptr_t)0x3) /* 11 */
162 #define NPI_NA_TAG_MASK ((uintptr_t)0x3) /* 11 */
163
164 #define NPI_NA_TAG(_p) ((uintptr_t)(_p) & NPI_NA_TAG_MASK)
165 #define NPI_NA_ADDR_MASK (~NPI_NA_TAG_MASK)
166
167 #define NPI_NA_STATE(_p) ((uintptr_t)(_p) & NPI_NA_STATE_MASK)
168 #define NPI_NA_STATE_ENC(_s) ((uintptr_t)(_s) & NPI_NA_STATE_MASK)
169
170 #define NPI_NA_ADDR(_p) ((uintptr_t)(_p) & NPI_NA_ADDR_MASK)
171 #define NPI_NA_ADDR_ENC(_p) ((uintptr_t)(_p) & NPI_NA_ADDR_MASK)
172
173 #define NPI_NA_ENCODE(_p, _s) (NPI_NA_ADDR_ENC(_p) | NPI_NA_STATE_ENC(_s))
174
175 #define NPI_NA(_npi) \
176 ((struct nexus_adapter *)NPI_NA_ADDR((_npi)->npi_nah))
177 #define NPI_IS_DEFUNCT(_npi) \
178 (NPI_NA_STATE((_npi)->npi_nah) == NEXUS_PORT_STATE_DEFUNCT)
179
180 /*
181 * Nexus-wide advisory region and object.
182 */
183 struct kern_nexus_advisory {
184 struct skmem_region *nxv_reg;
185 void *nxv_adv;
186 nexus_advisory_type_t nxv_adv_type;
187 union {
188 struct sk_nexusadv *flowswitch_nxv_adv;
189 struct netif_nexus_advisory *netif_nxv_adv;
190 };
191 };
192
193 /*
194 * Nexus instance.
195 *
196 * At present most fields are protected by sk_lock. The exception is
197 * the nx_ch_if_adv_head list which uses nx_ch_if_adv_lock instead.
198 *
199 * In cases where sk_lock, nx_ch_if_adv_lock and ch_lock must be held,
200 * the following ordering needs to be followed:
201 *
202 * sk_lock -> nx_ch_if_adv_lock -> ch_lock
203 */
204 struct kern_nexus {
205 uint32_t nx_refcnt;
206 volatile uint32_t nx_flags;
207 void *nx_ctx;
208 nexus_ctx_release_fn_t nx_ctx_release;
209 struct kern_nexus_provider *nx_prov;
210 uint64_t nx_id;
211 uuid_t nx_uuid;
212 STAILQ_ENTRY(kern_nexus) nx_prov_link;
213 RB_ENTRY(kern_nexus) nx_link;
214 STAILQ_HEAD(, kern_channel) nx_ch_head;
215 uint32_t nx_ch_count;
216 STAILQ_HEAD(, kern_channel) nx_ch_nonxref_head;
217 decl_lck_rw_data(, nx_ch_if_adv_lock);
218 STAILQ_HEAD(, kern_channel) nx_ch_if_adv_head;
219 void *nx_arg;
220 struct kern_pbufpool *nx_rx_pp;
221 struct kern_pbufpool *nx_tx_pp;
222 struct kern_nexus_advisory nx_adv;
223
224 /* nexus port */
225 struct nx_port_info *nx_ports;
226 bitmap_t *nx_ports_bmap;
227 nexus_port_size_t nx_active_ports;
228 nexus_port_size_t nx_num_ports;
229 };
230
231 #define NXF_ATTACHED 0x1
232 #define NXF_CLOSED 0x2 /* attached but closed */
233 #define NXF_REJECT (1U << 31) /* not accepting channel activities */
234
235 #define NXF_BITS \
236 "\020\01ATTACHED\02CLOSED\040REJECT"
237
238 #define NX_PROV(_nx) ((_nx)->nx_prov)
239 #define NX_DOM_PROV(_nx) (NX_PROV(_nx)->nxprov_dom_prov)
240 #define NX_DOM(_nx) (NX_DOM_PROV(_nx)->nxdom_prov_dom)
241
242 #define NX_REJECT_ACT(_nx) (((_nx)->nx_flags & NXF_REJECT) != 0)
243
244 /*
245 * Nexus provider.
246 */
247 struct kern_nexus_provider {
248 uint32_t nxprov_refcnt;
249 uint32_t nxprov_flags;
250 STAILQ_ENTRY(kern_nexus_provider) nxprov_link;
251 STAILQ_HEAD(, kern_nexus) nxprov_nx_head;
252 uint32_t nxprov_nx_count;
253 struct nxctl *nxprov_ctl;
254 uuid_t nxprov_uuid;
255 struct kern_nexus_domain_provider *nxprov_dom_prov;
256 union {
257 struct kern_nexus_provider_init nxprov_ext;
258 struct kern_nexus_netif_provider_init nxprov_netif_ext;
259 };
260 struct nxprov_params *nxprov_params;
261 struct skmem_region_params nxprov_region_params[SKMEM_REGIONS];
262 };
263
264 /* valid flags for nxprov_flags */
265 #define NXPROVF_ATTACHED 0x1 /* attached to global list */
266 #define NXPROVF_CLOSED 0x2 /* attached but closed */
267 #define NXPROVF_EXTERNAL 0x4 /* external nexus provider */
268 #define NXPROVF_VIRTUAL_DEVICE 0x8 /* device is virtual (no DMA) */
269
270 #define NXPROV_LLINK(_nxp) \
271 ((_nxp)->nxprov_params->nxp_flags & NXPF_NETIF_LLINK)
272
273 #define NXPROVF_BITS \
274 "\020\01ATTACHED\02CLOSED\03EXTERNAL\04VIRTUALDEV"
275
276 #define NX_ANONYMOUS_PROV(_nx) \
277 (NX_PROV(_nx)->nxprov_params->nxp_flags & NXPF_ANONYMOUS)
278 #define NX_USER_CHANNEL_PROV(_nx) \
279 (NX_PROV(_nx)->nxprov_params->nxp_flags & NXPF_USER_CHANNEL)
280 #define NX_LLINK_PROV(_nx) NXPROV_LLINK(NX_PROV(_nx))
281
282 /*
283 * Nexus domain provider.
284 */
285 struct kern_nexus_domain_provider {
286 STAILQ_ENTRY(kern_nexus_domain_provider) nxdom_prov_link;
287 STAILQ_ENTRY(kern_nexus_domain_provider) nxdom_prov_detaching_link;
288 char nxdom_prov_name[64];
289 uuid_t nxdom_prov_uuid;
290 uint64_t nxdom_prov_gencnt;
291 uint32_t nxdom_prov_refcnt;
292 uint32_t nxdom_prov_flags;
293 struct nxdom *nxdom_prov_dom;
294 struct kern_nexus_domain_provider_init nxdom_prov_ext;
295 /*
296 * The callbacks are grouped together to simplify the
297 * initialization of external domain providers; see
298 * kern_nexus_register_domain_provider() for details.
299 */
300 struct nxdom_prov_cb {
301 int (*dp_cb_init)(struct kern_nexus_domain_provider *);
302 void (*dp_cb_fini)(struct kern_nexus_domain_provider *);
303 int (*dp_cb_params)(struct kern_nexus_domain_provider *,
304 const uint32_t, const struct nxprov_params *,
305 struct nxprov_params *,
306 struct skmem_region_params[SKMEM_REGIONS], uint32_t);
307 int (*dp_cb_mem_new)(struct kern_nexus_domain_provider *,
308 struct kern_nexus *, struct nexus_adapter *);
309 int (*dp_cb_config)(struct kern_nexus_domain_provider *,
310 struct kern_nexus *, struct nx_cfg_req *, int,
311 struct proc *, kauth_cred_t);
312 int (*dp_cb_nx_ctor)(struct kern_nexus *);
313 void (*dp_cb_nx_dtor)(struct kern_nexus *);
314 int (*dp_cb_nx_mem_info)(struct kern_nexus *,
315 struct kern_pbufpool **, struct kern_pbufpool **);
316 size_t (*dp_cb_nx_mib_get)(struct kern_nexus *,
317 struct nexus_mib_filter *, void *, size_t, struct proc *);
318 int (*dp_cb_nx_stop)(struct kern_nexus *);
319 } nxdom_prov_cb;
320 #define nxdom_prov_init nxdom_prov_cb.dp_cb_init
321 #define nxdom_prov_fini nxdom_prov_cb.dp_cb_fini
322 #define nxdom_prov_params nxdom_prov_cb.dp_cb_params
323 #define nxdom_prov_mem_new nxdom_prov_cb.dp_cb_mem_new
324 #define nxdom_prov_config nxdom_prov_cb.dp_cb_config
325 #define nxdom_prov_nx_ctor nxdom_prov_cb.dp_cb_nx_ctor
326 #define nxdom_prov_nx_dtor nxdom_prov_cb.dp_cb_nx_dtor
327 #define nxdom_prov_nx_mem_info nxdom_prov_cb.dp_cb_nx_mem_info
328 #define nxdom_prov_nx_mib_get nxdom_prov_cb.dp_cb_nx_mib_get
329 #define nxdom_prov_nx_stop nxdom_prov_cb.dp_cb_nx_stop
330 };
331
332 #define NXDOMPROVF_INITIALIZED 0x1 /* provider has been initialized */
333 #define NXDOMPROVF_ATTACHED 0x2 /* provider is attached to a domain */
334 #define NXDOMPROVF_DETACHING 0x4 /* provider is being detached */
335 #define NXDOMPROVF_EXT 0x8 /* external provider */
336 #define NXDOMPROVF_EXT_INITED 0x10 /* nxpi_init() succeeded */
337 #define NXDOMPROVF_DEFAULT 0x20 /* default provider for domain */
338
339 struct nxp_bounds {
340 uint32_t nb_def;
341 uint32_t nb_min;
342 uint32_t nb_max;
343 };
344
345 /*
346 * Nexus domain.
347 *
348 * Each Nexus type is represented by a Nexus domain; there can
349 * be more than one providers for a given domain.
350 */
351 struct nxdom {
352 STAILQ_ENTRY(nxdom) nxdom_link;
353 STAILQ_HEAD(, kern_nexus_domain_provider) nxdom_prov_head;
354 nexus_type_t nxdom_type;
355 nexus_meta_type_t nxdom_md_type;
356 nexus_meta_subtype_t nxdom_md_subtype;
357 uint32_t nxdom_flags;
358 struct nxp_bounds nxdom_ports;
359 struct nxp_bounds nxdom_tx_rings;
360 struct nxp_bounds nxdom_rx_rings;
361 struct nxp_bounds nxdom_tx_slots;
362 struct nxp_bounds nxdom_rx_slots;
363 struct nxp_bounds nxdom_buf_size;
364 struct nxp_bounds nxdom_large_buf_size;
365 struct nxp_bounds nxdom_meta_size;
366 struct nxp_bounds nxdom_stats_size;
367 struct nxp_bounds nxdom_pipes;
368 struct nxp_bounds nxdom_extensions;
369 struct nxp_bounds nxdom_mhints;
370 struct nxp_bounds nxdom_flowadv_max;
371 struct nxp_bounds nxdom_nexusadv_size;
372 struct nxp_bounds nxdom_capabilities;
373 struct nxp_bounds nxdom_qmap;
374 struct nxp_bounds nxdom_max_frags;
375 struct skmem_region_params nxdom_region_params[SKMEM_REGIONS];
376 const char *nxdom_name;
377
378 /*
379 * Nexus domain callbacks.
380 */
381 void (*nxdom_init)(struct nxdom *); /* optional */
382 void (*nxdom_terminate)(struct nxdom *); /* optional */
383 void (*nxdom_fini)(struct nxdom *); /* optional */
384 int (*nxdom_find_port) /* optional */
385 (struct kern_nexus *, boolean_t, nexus_port_t *);
386 boolean_t (*nxdom_port_is_reserved) /* optional */
387 (struct kern_nexus *, nexus_port_t);
388 int (*nxdom_bind_port) /* required */
389 (struct kern_nexus *, nexus_port_t *, struct nxbind *, void *);
390 int (*nxdom_unbind_port) /* required */
391 (struct kern_nexus *, nexus_port_t);
392 int (*nxdom_connect) /* required */
393 (struct kern_nexus_domain_provider *, struct kern_nexus *,
394 struct kern_channel *, struct chreq *, struct kern_channel *,
395 struct nxbind *, struct proc *);
396 void (*nxdom_disconnect) /* required */
397 (struct kern_nexus_domain_provider *, struct kern_nexus *,
398 struct kern_channel *);
399 void (*nxdom_defunct) /* required */
400 (struct kern_nexus_domain_provider *, struct kern_nexus *,
401 struct kern_channel *, struct proc *);
402 void (*nxdom_defunct_finalize) /* required */
403 (struct kern_nexus_domain_provider *, struct kern_nexus *,
404 struct kern_channel *, boolean_t);
405 };
406
407 #define NEXUSDOMF_INITIALIZED 0x1 /* domain has been initialized */
408 #define NEXUSDOMF_ATTACHED 0x2 /* domain is globally attached */
409 #define NEXUSDOMF_TERMINATED 0x4 /* domain has been terminated */
410
411 #define NXDOM_DEF(_dom, var) ((_dom)->nxdom_##var.nb_def)
412 #define NXDOM_MIN(_dom, var) ((_dom)->nxdom_##var.nb_min)
413 #define NXDOM_MAX(_dom, var) ((_dom)->nxdom_##var.nb_max)
414
415 extern struct nexus_controller kernnxctl;
416 extern lck_grp_t nexus_lock_group;
417 extern lck_grp_t nexus_mbq_lock_group;
418 extern lck_grp_t nexus_pktq_lock_group;
419 extern lck_attr_t nexus_lock_attr;
420 extern kern_allocation_name_t skmem_tag_nx_key;
421 extern kern_allocation_name_t skmem_tag_nx_port_info;
422
423 extern struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX];
424
425 #define NX_SHARED_NXCTL_INSTANCE(_nxctl) \
426 ((_nxctl) == kernnxctl.ncd_nxctl)
427
428 #define NXCTL_LOCK(_nxctl) do { \
429 if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) { \
430 lck_mtx_lock(&((_nxctl)->nxctl_lock)); \
431 } else { \
432 LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
433 LCK_MTX_ASSERT_NOTOWNED); \
434 } \
435 } while (0)
436
437 #define NXCTL_UNLOCK(_nxctl) do { \
438 if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) { \
439 lck_mtx_unlock(&((_nxctl)->nxctl_lock));\
440 } \
441 LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
442 LCK_MTX_ASSERT_NOTOWNED); \
443 } while (0)
444
445 #define NXCTL_LOCK_ASSERT_HELD(_nxctl) do { \
446 if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) { \
447 LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
448 LCK_MTX_ASSERT_OWNED); \
449 } else { \
450 LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
451 LCK_MTX_ASSERT_NOTOWNED); \
452 } \
453 } while (0)
454
455 __BEGIN_DECLS
456 extern int nexus_init(void);
457 extern void nexus_fini(void);
458
459 extern struct kern_nexus *nx_create(struct nxctl *, const uuid_t,
460 const nexus_type_t, const void *, nexus_ctx_release_fn_t,
461 struct kern_pbufpool *, struct kern_pbufpool *, int *);
462 extern void nx_retain(struct kern_nexus *);
463 extern void nx_retain_locked(struct kern_nexus *);
464 extern int nx_release(struct kern_nexus *);
465 extern int nx_release_locked(struct kern_nexus *);
466 extern void nx_detach(struct kern_nexus *);
467 extern void nx_stop(struct kern_nexus *nx);
468 extern int nx_close(struct kern_nexus *, boolean_t);
469 extern int nx_destroy(struct nxctl *, const uuid_t);
470 extern struct kern_nexus *nx_find(const uuid_t, boolean_t);
471 extern int nx_advisory_alloc(struct kern_nexus *, const char *,
472 struct skmem_region_params *, nexus_advisory_type_t);
473 extern void nx_advisory_free(struct kern_nexus *);
474 extern int nx_port_find(struct kern_nexus *, nexus_port_t,
475 nexus_port_t, nexus_port_t *);
476 extern int nx_port_alloc(struct kern_nexus *, nexus_port_t,
477 struct nxbind *, struct nexus_adapter **, struct proc *);
478 extern int nx_port_bind(struct kern_nexus *, nexus_port_t,
479 struct nxbind *);
480 extern int nx_port_bind_info(struct kern_nexus *, nexus_port_t,
481 struct nxbind *, void *);
482 extern int nx_port_unbind(struct kern_nexus *, nexus_port_t);
483 extern struct nexus_adapter *nx_port_get_na(struct kern_nexus *,
484 nexus_port_t);
485 extern int nx_port_get_info(struct kern_nexus *, nexus_port_t,
486 nx_port_info_type_t, void *, uint32_t);
487 extern void nx_port_defunct(struct kern_nexus *, nexus_port_t);
488 extern void nx_port_free(struct kern_nexus *, nexus_port_t);
489 extern void nx_port_free_all(struct kern_nexus *);
490 extern bool nx_port_is_valid(struct kern_nexus *, nexus_port_t);
491 extern bool nx_port_is_defunct(struct kern_nexus *, nexus_port_t);
492 extern void nx_port_foreach(struct kern_nexus *, void (^)(nexus_port_t));
493 extern void nx_interface_advisory_notify(struct kern_nexus *);
494
495 extern struct nxctl *nxctl_create(struct proc *, struct fileproc *,
496 const uuid_t, int *);
497 extern void nxctl_close(struct nxctl *);
498 extern void nxctl_traffic_rule_clean(struct nxctl *);
499 extern void nxctl_traffic_rule_init(void);
500 extern void nxctl_traffic_rule_fini(void);
501 extern int nxctl_inet_traffic_rule_find_qset_id_with_pkt(const char *,
502 struct __kern_packet *, uint64_t *);
503 extern int nxctl_inet_traffic_rule_find_qset_id(const char *,
504 struct ifnet_traffic_descriptor_inet *, uint64_t *);
505 extern int nxctl_inet_traffic_rule_get_count(const char *, uint32_t *);
506 extern int nxctl_get_opt(struct nxctl *, struct sockopt *);
507 extern int nxctl_set_opt(struct nxctl *, struct sockopt *);
508 extern void nxctl_retain(struct nxctl *);
509 extern int nxctl_release(struct nxctl *);
510 extern void nxctl_dtor(void *);
511
512 extern int nxprov_advise_connect(struct kern_nexus *, struct kern_channel *,
513 struct proc *p);
514 extern void nxprov_advise_disconnect(struct kern_nexus *,
515 struct kern_channel *);
516 extern struct kern_nexus_provider *nxprov_create(struct proc *,
517 struct nxctl *, struct nxprov_reg *, int *);
518 extern struct kern_nexus_provider *nxprov_create_kern(struct nxctl *,
519 struct kern_nexus_domain_provider *, struct nxprov_reg *,
520 const struct kern_nexus_provider_init *init, int *err);
521 extern int nxprov_close(struct kern_nexus_provider *, boolean_t);
522 extern int nxprov_destroy(struct nxctl *, const uuid_t);
523 extern void nxprov_retain(struct kern_nexus_provider *);
524 extern int nxprov_release(struct kern_nexus_provider *);
525 extern struct nxprov_params *nxprov_params_alloc(zalloc_flags_t);
526 extern void nxprov_params_free(struct nxprov_params *);
527
528 struct nxprov_adjusted_params {
529 nexus_meta_subtype_t *adj_md_subtype;
530 uint32_t *adj_stats_size;
531 uint32_t *adj_flowadv_max;
532 uint32_t *adj_nexusadv_size;
533 uint32_t *adj_caps;
534 uint32_t *adj_tx_rings;
535 uint32_t *adj_rx_rings;
536 uint32_t *adj_tx_slots;
537 uint32_t *adj_rx_slots;
538 uint32_t *adj_alloc_rings;
539 uint32_t *adj_free_rings;
540 uint32_t *adj_alloc_slots;
541 uint32_t *adj_free_slots;
542 uint32_t *adj_buf_size;
543 uint32_t *adj_buf_region_segment_size;
544 uint32_t *adj_pp_region_config_flags;
545 uint32_t *adj_max_frags;
546 uint32_t *adj_event_rings;
547 uint32_t *adj_event_slots;
548 uint32_t *adj_max_buffers;
549 uint32_t *adj_large_buf_size;
550 };
551
552 extern int nxprov_params_adjust(struct kern_nexus_domain_provider *,
553 const uint32_t, const struct nxprov_params *, struct nxprov_params *,
554 struct skmem_region_params[SKMEM_REGIONS], const struct nxdom *,
555 const struct nxdom *, const struct nxdom *, uint32_t,
556 int (*adjust_fn)(const struct kern_nexus_domain_provider *,
557 const struct nxprov_params *, struct nxprov_adjusted_params *));
558
559 extern void nxdom_attach_all(void);
560 extern void nxdom_detach_all(void);
561 extern struct nxdom *nxdom_find(nexus_type_t);
562
563 extern struct kern_nexus_domain_provider *nxdom_prov_find(
564 const struct nxdom *, const char *);
565 extern struct kern_nexus_domain_provider *nxdom_prov_find_uuid(const uuid_t);
566 extern int nxdom_prov_add(struct nxdom *, struct kern_nexus_domain_provider *);
567 extern void nxdom_prov_del(struct kern_nexus_domain_provider *);
568 extern void nxdom_prov_retain_locked(struct kern_nexus_domain_provider *);
569 extern void nxdom_prov_retain(struct kern_nexus_domain_provider *);
570 extern boolean_t nxdom_prov_release_locked(struct kern_nexus_domain_provider *);
571 extern boolean_t nxdom_prov_release(struct kern_nexus_domain_provider *);
572 extern int nxdom_prov_validate_params(struct kern_nexus_domain_provider *,
573 const struct nxprov_reg *, struct nxprov_params *,
574 struct skmem_region_params[SKMEM_REGIONS], const uint32_t, uint32_t);
575
576 extern struct nxbind *nxb_alloc(zalloc_flags_t);
577 extern void nxb_free(struct nxbind *);
578 extern boolean_t nxb_is_equal(struct nxbind *, struct nxbind *);
579 extern void nxb_move(struct nxbind *, struct nxbind *);
580
581 typedef void kern_nexus_walktree_f_t(struct kern_nexus *, void *);
582 extern void kern_nexus_walktree(kern_nexus_walktree_f_t *, void *, boolean_t);
583
584 extern int kern_nexus_get_pbufpool_info(const uuid_t nx_uuid,
585 struct kern_pbufpool_memory_info *rx_pool,
586 struct kern_pbufpool_memory_info *tx_pool);
587 __END_DECLS
588
589 #include <skywalk/nexus/nexus_adapter.h>
590
591 __attribute__((always_inline))
592 static inline int
nx_sync_tx(struct __kern_channel_ring * kring,boolean_t commit)593 nx_sync_tx(struct __kern_channel_ring *kring, boolean_t commit)
594 {
595 struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
596
597 ASSERT(kring->ckr_tx == NR_TX);
598 if (nxprov->nxprov_ext.nxpi_sync_tx != NULL) {
599 return nxprov->nxprov_ext.nxpi_sync_tx(nxprov,
600 KRNA(kring)->na_nx, kring,
601 (commit ? KERN_NEXUS_SYNCF_COMMIT : 0));
602 } else {
603 return 0;
604 }
605 }
606
607 __attribute__((always_inline))
608 static inline int
nx_sync_rx(struct __kern_channel_ring * kring,boolean_t commit)609 nx_sync_rx(struct __kern_channel_ring *kring, boolean_t commit)
610 {
611 struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
612
613 ASSERT(kring->ckr_tx == NR_RX);
614 if (nxprov->nxprov_ext.nxpi_sync_rx != NULL) {
615 return nxprov->nxprov_ext.nxpi_sync_rx(nxprov,
616 KRNA(kring)->na_nx, kring,
617 (commit ? KERN_NEXUS_SYNCF_COMMIT : 0));
618 } else {
619 return 0;
620 }
621 }
622
623 __attribute__((always_inline))
624 static __inline__ void
nx_tx_doorbell(struct __kern_channel_ring * kring,boolean_t async)625 nx_tx_doorbell(struct __kern_channel_ring *kring, boolean_t async)
626 {
627 struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
628
629 ASSERT(kring->ckr_tx == NR_TX);
630 ASSERT(nxprov->nxprov_ext.nxpi_tx_doorbell != NULL);
631 nxprov->nxprov_ext.nxpi_tx_doorbell(nxprov, KRNA(kring)->na_nx,
632 kring, (async ? KERN_NEXUS_TXDOORBELLF_ASYNC_REFILL: 0));
633 }
634
635 __attribute__((always_inline))
636 static inline int
nx_rx_sync_packets(struct __kern_channel_ring * kring,uint64_t packets[],uint32_t * count)637 nx_rx_sync_packets(struct __kern_channel_ring *kring, uint64_t packets[],
638 uint32_t *count)
639 {
640 struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
641
642 ASSERT(kring->ckr_tx == NR_RX);
643 if (nxprov->nxprov_ext.nxpi_rx_sync_packets != NULL) {
644 return nxprov->nxprov_ext.nxpi_rx_sync_packets(nxprov,
645 KRNA(kring)->na_nx, kring, packets, count, 0);
646 } else {
647 return 0;
648 }
649 }
650
651 __attribute__((always_inline))
652 static inline boolean_t
nx_has_rx_sync_packets(struct __kern_channel_ring * kring)653 nx_has_rx_sync_packets(struct __kern_channel_ring *kring)
654 {
655 struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
656
657 ASSERT(kring->ckr_tx == NR_RX);
658 return nxprov->nxprov_ext.nxpi_rx_sync_packets != NULL;
659 }
660
661 __attribute__((always_inline))
662 static __inline__ errno_t
nx_tx_qset_notify(struct kern_nexus * nx,void * qset_ctx)663 nx_tx_qset_notify(struct kern_nexus *nx, void *qset_ctx)
664 {
665 struct kern_nexus_provider *nxprov = NX_PROV(nx);
666 sk_protect_t protect;
667 errno_t err;
668
669 ASSERT(nxprov->nxprov_netif_ext.nxnpi_tx_qset_notify != NULL);
670 protect = sk_tx_notify_protect();
671 err = nxprov->nxprov_netif_ext.nxnpi_tx_qset_notify(nxprov, nx,
672 qset_ctx, 0);
673 sk_tx_notify_unprotect(protect);
674 return err;
675 }
676 #endif /* BSD_KERNEL_PRIVATE */
677 #endif /* _SKYWALK_NEXUS_NEXUSVAR_H_ */
678