xref: /xnu-11215.61.5/bsd/skywalk/nexus/nexus_var.h (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 /*
2  * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri.
31  * All rights reserved.
32  * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
33  *
34  * Redistribution and use in source and binary forms, with or without
35  * modification, are permitted provided that the following conditions
36  * are met:
37  *   1. Redistributions of source code must retain the above copyright
38  *      notice, this list of conditions and the following disclaimer.
39  *   2. Redistributions in binary form must reproduce the above copyright
40  *      notice, this list of conditions and the following disclaimer in the
41  *    documentation and/or other materials provided with the distribution.
42  *
43  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53  * SUCH DAMAGE.
54  */
55 
56 #ifndef _SKYWALK_NEXUS_NEXUSVAR_H_
57 #define _SKYWALK_NEXUS_NEXUSVAR_H_
58 
59 #ifdef BSD_KERNEL_PRIVATE
60 #include <skywalk/core/skywalk_var.h>
61 #include <skywalk/os_nexus_private.h>
62 
63 struct chreq;
64 struct nxdom;
65 struct kern_channel;
66 struct kern_nexus_domain_provider;
67 
68 /*
69  * Nexus controller instance.
70  */
71 struct nxctl {
72 	decl_lck_mtx_data(, nxctl_lock);
73 	uint32_t                nxctl_refcnt;
74 	uint32_t                nxctl_flags;
75 	uuid_t                  nxctl_uuid;
76 	uuid_t                  nxctl_proc_uuid;
77 	uint64_t                nxctl_proc_uniqueid;
78 	STAILQ_ENTRY(nxctl)     nxctl_link;
79 	struct fileproc         *nxctl_fp;
80 	kauth_cred_t            nxctl_cred;
81 	/*
82 	 * -fbounds-safety: nxctl_traffic_rule_storage only gets used as of type
83 	 * struct nxctl_traffic_rule_storage *
84 	 */
85 	struct nxctl_traffic_rule_storage *nxctl_traffic_rule_storage;
86 };
87 
88 #define NEXUSCTLF_ATTACHED      0x1
89 #define NEXUSCTLF_NOFDREF       0x2
90 #define NEXUSCTLF_KERNEL        0x4
91 
92 #define NEXUSCTLF_BITS  \
93 	"\020\01ATTACHED\02NOFDREF\03KERNEL"
94 
95 /*
96  * Nexus port binding structure.
97  */
98 struct nxbind {
99 	uint32_t                nxb_flags;
100 	pid_t                   nxb_pid;
101 	uint64_t                nxb_uniqueid;
102 	uuid_t                  nxb_exec_uuid;
103 	uint32_t                nxb_key_len;
104 	void                    *__sized_by(nxb_key_len) nxb_key;
105 };
106 
107 #define NXBF_MATCH_UNIQUEID     0x1     /* match against process's unique ID */
108 #define NXBF_MATCH_EXEC_UUID    0x2     /* match against executable's UUID */
109 #define NXBF_MATCH_KEY          0x4     /* match against key blob */
110 
111 #define NXBF_BITS       \
112 	"\020\01UNIQUEID\02EXEC_UUID\03KEY"
113 
114 /*
115  * Nexus port info structure.
116  */
117 struct nx_port_info {
118 	/*
119 	 * We need to store some states on the nexus port info,
120 	 * e.g. defunct.  The states are encoded in the tagged
121 	 * pointer handle npi_nah.
122 	 */
123 	uintptr_t               npi_nah;
124 	struct nxbind           *npi_nxb;
125 	void                    *npi_info;
126 };
127 
128 /*
129  * Used for indicating what type is attached to npi_info
130  * The type enum is defined here. One namespace for all nexus types.
131  * The actual structure is defined in nexus specific headers.
132  */
133 typedef enum {
134 	NX_PORT_INFO_TYPE_NETIF = 0x10000001
135 } nx_port_info_type_t;
136 
137 /*
138  * Header of nexus specific structure npi_info
139  */
140 struct nx_port_info_header {
141 	nx_port_info_type_t     ih_type;
142 	size_t                  ih_size;
143 };
144 
145 #define NX_PORT_CHUNK      64
146 #define NX_PORT_CHUNK_FREE 0xffffffffffffffff /* entire chunk is free */
147 
148 /*
149  * Nexus port state type.
150  *
151  * Be mindful that due to the use of tagger pointer for nexus adapter in the
152  * nexus port info structure, this type gets encoded with the requirement
153  * that the object addresses are aligned on 4-bytes boundary at the minimum.
154  * That leaves 2 bits for the states, therefore limiting the maximum enum
155  * value to 3.
156  */
157 typedef enum {
158 	NEXUS_PORT_STATE_WORKING = 0,           /* fully operational */
159 	NEXUS_PORT_STATE_DEFUNCT,               /* no longer in service */
160 	NEXUS_PORT_STATE_RESERVED_1,            /* for future use */
161 	NEXUS_PORT_STATE_RESERVED_2,            /* for future use */
162 	NEXUS_PORT_STATE_MAX = NEXUS_PORT_STATE_RESERVED_2
163 } nexus_port_state_t;
164 
165 #define NPI_NA_STATE_MASK       ((uintptr_t)0x3)        /* 11 */
166 #define NPI_NA_TAG_MASK         ((uintptr_t)0x3)        /* 11 */
167 
168 #define NPI_NA_TAG(_p)          ((uintptr_t)(_p) & NPI_NA_TAG_MASK)
169 #define NPI_NA_ADDR_MASK        (~NPI_NA_TAG_MASK)
170 
171 #define NPI_NA_STATE(_p)        ((uintptr_t)(_p) & NPI_NA_STATE_MASK)
172 #define NPI_NA_STATE_ENC(_s)    ((uintptr_t)(_s) & NPI_NA_STATE_MASK)
173 
174 #define NPI_NA_ADDR(_p)         ((uintptr_t)(_p) & NPI_NA_ADDR_MASK)
175 #define NPI_NA_ADDR_ENC(_p)     ((uintptr_t)(_p) & NPI_NA_ADDR_MASK)
176 
177 #define NPI_NA_ENCODE(_p, _s)   (NPI_NA_ADDR_ENC(_p) | NPI_NA_STATE_ENC(_s))
178 
179 #define NPI_NA(_npi)            \
180 	(__unsafe_forge_single(struct nexus_adapter *, NPI_NA_ADDR((_npi)->npi_nah)))
181 #define NPI_IS_DEFUNCT(_npi)    \
182 	(NPI_NA_STATE((_npi)->npi_nah) == NEXUS_PORT_STATE_DEFUNCT)
183 
184 /*
185  * Nexus-wide advisory region and object.
186  */
187 struct kern_nexus_advisory {
188 	struct skmem_region     *nxv_reg;
189 	void                    *__sized_by(nxv_adv_size) nxv_adv;
190 	nexus_advisory_type_t   nxv_adv_type;
191 	union {
192 		struct sk_nexusadv             *flowswitch_nxv_adv;
193 		struct netif_nexus_advisory    *netif_nxv_adv;
194 	};
195 	uint32_t                nxv_adv_size;
196 };
197 
198 /*
199  * Nexus instance.
200  *
201  * At present most fields are protected by sk_lock.  The exception is
202  * the nx_ch_if_adv_head list which uses nx_ch_if_adv_lock instead.
203  *
204  * In cases where sk_lock, nx_ch_if_adv_lock and ch_lock must be held,
205  * the following ordering needs to be followed:
206  *
207  *   sk_lock -> nx_ch_if_adv_lock -> ch_lock
208  */
209 struct kern_nexus {
210 	uint32_t                nx_refcnt;
211 	volatile uint32_t       nx_flags;
212 	void                    *nx_ctx;
213 	nexus_ctx_release_fn_t  nx_ctx_release;
214 	struct kern_nexus_provider *nx_prov;
215 	uint64_t                nx_id;
216 	uuid_t                  nx_uuid;
217 	STAILQ_ENTRY(kern_nexus) nx_prov_link;
218 	RB_ENTRY(kern_nexus)    nx_link;
219 	STAILQ_HEAD(, kern_channel) nx_ch_head;
220 	uint32_t                nx_ch_count;
221 	STAILQ_HEAD(, kern_channel) nx_ch_nonxref_head;
222 	decl_lck_rw_data(, nx_ch_if_adv_lock);
223 	STAILQ_HEAD(, kern_channel) nx_ch_if_adv_head;
224 	void                    *nx_arg;
225 	struct kern_pbufpool    *nx_rx_pp;
226 	struct kern_pbufpool    *nx_tx_pp;
227 	struct kern_nexus_advisory nx_adv;
228 
229 	/* nexus port */
230 	struct nx_port_info     *__counted_by(nx_num_ports) nx_ports;
231 	bitmap_t                *__sized_by(nx_ports_bmap_size) nx_ports_bmap;
232 	nexus_port_size_t       nx_active_ports;
233 	nexus_port_size_t       nx_num_ports;
234 	size_t                  nx_ports_bmap_size;
235 };
236 
237 #define NXF_ATTACHED    0x1
238 #define NXF_CLOSED      0x2             /* attached but closed */
239 #define NXF_INVALIDATED 0x4             /* no longer allow opens */
240 #define NXF_REJECT      (1U << 31)      /* not accepting channel activities */
241 
242 #define NXF_BITS        \
243 	"\020\01ATTACHED\02CLOSED\040REJECT"
244 
245 #define NX_PROV(_nx)            ((_nx)->nx_prov)
246 #define NX_PROV_PARAMS(_nx)     (NX_PROV(_nx)->nxprov_params)
247 #define NX_DOM_PROV(_nx)        (NX_PROV(_nx)->nxprov_dom_prov)
248 #define NX_DOM(_nx)             (NX_DOM_PROV(_nx)->nxdom_prov_dom)
249 
250 #define NX_REJECT_ACT(_nx)      (((_nx)->nx_flags & NXF_REJECT) != 0)
251 
252 /*
253  * Nexus provider.
254  */
255 struct kern_nexus_provider {
256 	uint32_t                        nxprov_refcnt;
257 	uint32_t                        nxprov_flags;
258 	STAILQ_ENTRY(kern_nexus_provider) nxprov_link;
259 	STAILQ_HEAD(, kern_nexus)       nxprov_nx_head;
260 	uint32_t                        nxprov_nx_count;
261 	struct nxctl                    *nxprov_ctl;
262 	uuid_t                          nxprov_uuid;
263 	struct kern_nexus_domain_provider *nxprov_dom_prov;
264 	union {
265 		struct kern_nexus_provider_init nxprov_ext;
266 		struct kern_nexus_netif_provider_init nxprov_netif_ext;
267 	};
268 	struct nxprov_params            *nxprov_params;
269 	struct skmem_region_params      nxprov_region_params[SKMEM_REGIONS];
270 };
271 
272 /* valid flags for nxprov_flags */
273 #define NXPROVF_ATTACHED        0x1     /* attached to global list */
274 #define NXPROVF_CLOSED          0x2     /* attached but closed */
275 #define NXPROVF_EXTERNAL        0x4     /* external nexus provider */
276 #define NXPROVF_VIRTUAL_DEVICE  0x8     /* device is virtual (no DMA) */
277 
278 #define NXPROV_LLINK(_nxp) \
279 	((_nxp)->nxprov_params->nxp_flags & NXPF_NETIF_LLINK)
280 
281 #define NXPROVF_BITS    \
282 	"\020\01ATTACHED\02CLOSED\03EXTERNAL\04VIRTUALDEV"
283 
284 #define NX_ANONYMOUS_PROV(_nx)  \
285 	(NX_PROV(_nx)->nxprov_params->nxp_flags & NXPF_ANONYMOUS)
286 #define NX_USER_CHANNEL_PROV(_nx) \
287 	(NX_PROV(_nx)->nxprov_params->nxp_flags & NXPF_USER_CHANNEL)
288 #define NX_LLINK_PROV(_nx)    NXPROV_LLINK(NX_PROV(_nx))
289 
290 /*
291  * Nexus domain provider.
292  */
293 struct kern_nexus_domain_provider {
294 	STAILQ_ENTRY(kern_nexus_domain_provider) nxdom_prov_link;
295 	STAILQ_ENTRY(kern_nexus_domain_provider) nxdom_prov_detaching_link;
296 	char                    nxdom_prov_name[64];
297 	uuid_t                  nxdom_prov_uuid;
298 	uint64_t                nxdom_prov_gencnt;
299 	uint32_t                nxdom_prov_refcnt;
300 	uint32_t                nxdom_prov_flags;
301 	struct nxdom            *nxdom_prov_dom;
302 	struct kern_nexus_domain_provider_init nxdom_prov_ext;
303 	/*
304 	 * The callbacks are grouped together to simplify the
305 	 * initialization of external domain providers; see
306 	 * kern_nexus_register_domain_provider() for details.
307 	 */
308 	struct nxdom_prov_cb {
309 		int (*dp_cb_init)(struct kern_nexus_domain_provider *);
310 		void (*dp_cb_fini)(struct kern_nexus_domain_provider *);
311 		int (*dp_cb_params)(struct kern_nexus_domain_provider *,
312 		    const uint32_t, const struct nxprov_params *,
313 		    struct nxprov_params *,
314 		    struct skmem_region_params[SKMEM_REGIONS], uint32_t);
315 		int (*dp_cb_mem_new)(struct kern_nexus_domain_provider *,
316 		    struct kern_nexus *, struct nexus_adapter *);
317 		int (*dp_cb_config)(struct kern_nexus_domain_provider *,
318 		    struct kern_nexus *, struct nx_cfg_req *, int,
319 		    struct proc *, kauth_cred_t);
320 		int (*dp_cb_nx_ctor)(struct kern_nexus *);
321 		void (*dp_cb_nx_dtor)(struct kern_nexus *);
322 		int (*dp_cb_nx_mem_info)(struct kern_nexus *,
323 		    struct kern_pbufpool **, struct kern_pbufpool **);
324 		size_t (*dp_cb_nx_mib_get)(struct kern_nexus *,
325 		    struct nexus_mib_filter *, void *, size_t, struct proc *);
326 		int (*dp_cb_nx_stop)(struct kern_nexus *);
327 	} nxdom_prov_cb;
328 #define nxdom_prov_init         nxdom_prov_cb.dp_cb_init
329 #define nxdom_prov_fini         nxdom_prov_cb.dp_cb_fini
330 #define nxdom_prov_params       nxdom_prov_cb.dp_cb_params
331 #define nxdom_prov_mem_new      nxdom_prov_cb.dp_cb_mem_new
332 #define nxdom_prov_config       nxdom_prov_cb.dp_cb_config
333 #define nxdom_prov_nx_ctor      nxdom_prov_cb.dp_cb_nx_ctor
334 #define nxdom_prov_nx_dtor      nxdom_prov_cb.dp_cb_nx_dtor
335 #define nxdom_prov_nx_mem_info  nxdom_prov_cb.dp_cb_nx_mem_info
336 #define nxdom_prov_nx_mib_get   nxdom_prov_cb.dp_cb_nx_mib_get
337 #define nxdom_prov_nx_stop      nxdom_prov_cb.dp_cb_nx_stop
338 };
339 
340 #define NXDOMPROVF_INITIALIZED  0x1     /* provider has been initialized */
341 #define NXDOMPROVF_ATTACHED     0x2     /* provider is attached to a domain */
342 #define NXDOMPROVF_DETACHING    0x4     /* provider is being detached */
343 #define NXDOMPROVF_EXT          0x8     /* external provider */
344 #define NXDOMPROVF_EXT_INITED   0x10    /* nxpi_init() succeeded */
345 #define NXDOMPROVF_DEFAULT      0x20    /* default provider for domain */
346 
347 struct nxp_bounds {
348 	uint32_t        nb_def;
349 	uint32_t        nb_min;
350 	uint32_t        nb_max;
351 };
352 
353 /*
354  * Nexus domain.
355  *
356  * Each Nexus type is represented by a Nexus domain; there can
357  * be more than one providers for a given domain.
358  */
359 struct nxdom {
360 	STAILQ_ENTRY(nxdom) nxdom_link;
361 	STAILQ_HEAD(, kern_nexus_domain_provider) nxdom_prov_head;
362 	nexus_type_t    nxdom_type;
363 	nexus_meta_type_t nxdom_md_type;
364 	nexus_meta_subtype_t nxdom_md_subtype;
365 	uint32_t        nxdom_flags;
366 	struct nxp_bounds nxdom_ports;
367 	struct nxp_bounds nxdom_tx_rings;
368 	struct nxp_bounds nxdom_rx_rings;
369 	struct nxp_bounds nxdom_tx_slots;
370 	struct nxp_bounds nxdom_rx_slots;
371 	struct nxp_bounds nxdom_buf_size;
372 	struct nxp_bounds nxdom_large_buf_size;
373 	struct nxp_bounds nxdom_meta_size;
374 	struct nxp_bounds nxdom_stats_size;
375 	struct nxp_bounds nxdom_pipes;
376 	struct nxp_bounds nxdom_extensions;
377 	struct nxp_bounds nxdom_mhints;
378 	struct nxp_bounds nxdom_flowadv_max;
379 	struct nxp_bounds nxdom_nexusadv_size;
380 	struct nxp_bounds nxdom_capabilities;
381 	struct nxp_bounds nxdom_qmap;
382 	struct nxp_bounds nxdom_max_frags;
383 	struct skmem_region_params nxdom_region_params[SKMEM_REGIONS];
384 	const char      *nxdom_name;
385 
386 	/*
387 	 * Nexus domain callbacks.
388 	 */
389 	void (*nxdom_init)(struct nxdom *);             /* optional */
390 	void (*nxdom_terminate)(struct nxdom *);        /* optional */
391 	void (*nxdom_fini)(struct nxdom *);             /* optional */
392 	int (*nxdom_find_port)                          /* optional */
393 	(struct kern_nexus *, boolean_t, nexus_port_t *);
394 	boolean_t (*nxdom_port_is_reserved)             /* optional */
395 	(struct kern_nexus *, nexus_port_t);
396 	int (*nxdom_bind_port)                          /* required */
397 	(struct kern_nexus *, nexus_port_t *, struct nxbind *, void *);
398 	int (*nxdom_unbind_port)                        /* required */
399 	(struct kern_nexus *, nexus_port_t);
400 	int (*nxdom_connect)                            /* required */
401 	(struct kern_nexus_domain_provider *, struct kern_nexus *,
402 	struct kern_channel *, struct chreq *, struct kern_channel *,
403 	struct nxbind *, struct proc *);
404 	void (*nxdom_disconnect)                        /* required */
405 	(struct kern_nexus_domain_provider *, struct kern_nexus *,
406 	struct kern_channel *);
407 	void (*nxdom_defunct)                           /* required */
408 	(struct kern_nexus_domain_provider *, struct kern_nexus *,
409 	struct kern_channel *, struct proc *);
410 	void (*nxdom_defunct_finalize)                  /* required */
411 	(struct kern_nexus_domain_provider *, struct kern_nexus *,
412 	struct kern_channel *, boolean_t);
413 };
414 
415 #define NEXUSDOMF_INITIALIZED   0x1     /* domain has been initialized */
416 #define NEXUSDOMF_ATTACHED      0x2     /* domain is globally attached */
417 #define NEXUSDOMF_TERMINATED    0x4     /* domain has been terminated */
418 
419 #define NXDOM_DEF(_dom, var)    ((_dom)->nxdom_##var.nb_def)
420 #define NXDOM_MIN(_dom, var)    ((_dom)->nxdom_##var.nb_min)
421 #define NXDOM_MAX(_dom, var)    ((_dom)->nxdom_##var.nb_max)
422 
423 extern struct nexus_controller kernnxctl;
424 extern struct nexus_controller usernxctl;
425 extern lck_grp_t nexus_lock_group;
426 extern lck_grp_t nexus_mbq_lock_group;
427 extern lck_grp_t nexus_pktq_lock_group;
428 extern lck_attr_t nexus_lock_attr;
429 extern kern_allocation_name_t skmem_tag_nx_key;
430 extern kern_allocation_name_t skmem_tag_nx_port_info;
431 
432 extern struct kern_nexus_domain_provider *nxdom_prov_default[NEXUS_TYPE_MAX];
433 
434 #define NX_SHARED_NXCTL_INSTANCE(_nxctl)        \
435     ((_nxctl) == kernnxctl.ncd_nxctl)
436 
437 #define NXCTL_LOCK(_nxctl)      do {                    \
438 	if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) {      \
439 	        lck_mtx_lock(&((_nxctl)->nxctl_lock));  \
440 	} else {                                        \
441 	        LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
442 	            LCK_MTX_ASSERT_NOTOWNED);           \
443 	}                                               \
444 } while (0)
445 
446 #define NXCTL_UNLOCK(_nxctl)    do {                    \
447 	if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) {      \
448 	        lck_mtx_unlock(&((_nxctl)->nxctl_lock));\
449 	}                                               \
450 	LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock),         \
451 	    LCK_MTX_ASSERT_NOTOWNED);                   \
452 } while (0)
453 
454 #define NXCTL_LOCK_ASSERT_HELD(_nxctl)  do {            \
455 	if (!NX_SHARED_NXCTL_INSTANCE((_nxctl))) {      \
456 	        LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
457 	            LCK_MTX_ASSERT_OWNED);              \
458 	} else {                                        \
459 	        LCK_MTX_ASSERT(&((_nxctl)->nxctl_lock), \
460 	            LCK_MTX_ASSERT_NOTOWNED);           \
461 	}                                               \
462 } while (0)
463 
464 __BEGIN_DECLS
465 extern int nexus_init(void);
466 extern void nexus_fini(void);
467 
468 extern struct kern_nexus *nx_create(struct nxctl *, const uuid_t,
469     const nexus_type_t, const void *, nexus_ctx_release_fn_t,
470     struct kern_pbufpool *, struct kern_pbufpool *, int *);
471 extern void nx_retain(struct kern_nexus *);
472 extern void nx_retain_locked(struct kern_nexus *);
473 extern int nx_release(struct kern_nexus *);
474 extern int nx_release_locked(struct kern_nexus *);
475 extern void nx_detach(struct kern_nexus *);
476 extern void nx_stop(struct kern_nexus *nx);
477 extern int nx_close(struct kern_nexus *, boolean_t);
478 extern int nx_destroy(struct nxctl *, const uuid_t);
479 extern struct kern_nexus *nx_find(const uuid_t, boolean_t);
480 extern int nx_advisory_alloc(struct kern_nexus *, const char *,
481     struct skmem_region_params *, nexus_advisory_type_t);
482 extern void nx_advisory_free(struct kern_nexus *);
483 extern int nx_port_find(struct kern_nexus *, nexus_port_t,
484     nexus_port_t, nexus_port_t *);
485 extern int nx_port_alloc(struct kern_nexus *, nexus_port_t,
486     struct nxbind *, struct nexus_adapter **, struct proc *);
487 extern int nx_port_bind(struct kern_nexus *, nexus_port_t,
488     struct nxbind *);
489 extern int nx_port_bind_info(struct kern_nexus *, nexus_port_t,
490     struct nxbind *, void *);
491 extern int nx_port_unbind(struct kern_nexus *, nexus_port_t);
492 extern struct nexus_adapter *nx_port_get_na(struct kern_nexus *,
493     nexus_port_t);
494 extern int nx_port_get_info(struct kern_nexus *, nexus_port_t,
495     nx_port_info_type_t, void *__sized_by(len), uint32_t len);
496 extern void nx_port_defunct(struct kern_nexus *, nexus_port_t);
497 extern void nx_port_free(struct kern_nexus *, nexus_port_t);
498 extern void nx_port_free_all(struct kern_nexus *);
499 extern bool nx_port_is_valid(struct kern_nexus *, nexus_port_t);
500 extern bool nx_port_is_defunct(struct kern_nexus *, nexus_port_t);
501 extern void nx_port_foreach(struct kern_nexus *, void (^)(nexus_port_t));
502 extern void nx_interface_advisory_notify(struct kern_nexus *);
503 
504 extern struct nxctl *nxctl_create(struct proc *, struct fileproc *,
505     const uuid_t, int *);
506 extern void nxctl_close(struct nxctl *);
507 extern void nxctl_traffic_rule_clean(struct nxctl *);
508 extern void nxctl_traffic_rule_init(void);
509 extern void nxctl_traffic_rule_fini(void);
510 extern int nxctl_inet_traffic_rule_find_qset_id_with_pkt(const char *,
511     struct __kern_packet *, uint64_t *);
512 extern int nxctl_inet_traffic_rule_find_qset_id(const char *,
513     struct ifnet_traffic_descriptor_inet *, uint64_t *);
514 extern int nxctl_inet_traffic_rule_get_count(const char *, uint32_t *);
515 extern int nxctl_get_opt(struct nxctl *, struct sockopt *);
516 extern int nxctl_set_opt(struct nxctl *, struct sockopt *);
517 extern void nxctl_retain(struct nxctl *);
518 extern int nxctl_release(struct nxctl *);
519 extern void nxctl_dtor(struct nxctl *);
520 
521 extern int nxprov_advise_connect(struct kern_nexus *, struct kern_channel *,
522     struct proc *p);
523 extern void nxprov_advise_disconnect(struct kern_nexus *,
524     struct kern_channel *);
525 extern struct kern_nexus_provider *nxprov_create(struct proc *,
526     struct nxctl *, struct nxprov_reg *, int *);
527 extern struct kern_nexus_provider *nxprov_create_kern(struct nxctl *,
528     struct kern_nexus_domain_provider *, struct nxprov_reg *,
529     const struct kern_nexus_provider_init *init, int *err);
530 extern int nxprov_close(struct kern_nexus_provider *, boolean_t);
531 extern int nxprov_destroy(struct nxctl *, const uuid_t);
532 extern void nxprov_retain(struct kern_nexus_provider *);
533 extern int nxprov_release(struct kern_nexus_provider *);
534 extern struct nxprov_params *nxprov_params_alloc(zalloc_flags_t);
535 extern void nxprov_params_free(struct nxprov_params *);
536 
537 struct nxprov_adjusted_params {
538 	nexus_meta_subtype_t *adj_md_subtype;
539 	uint32_t *adj_stats_size;
540 	uint32_t *adj_flowadv_max;
541 	uint32_t *adj_nexusadv_size;
542 	uint32_t *adj_caps;
543 	uint32_t *adj_tx_rings;
544 	uint32_t *adj_rx_rings;
545 	uint32_t *adj_tx_slots;
546 	uint32_t *adj_rx_slots;
547 	uint32_t *adj_alloc_rings;
548 	uint32_t *adj_free_rings;
549 	uint32_t *adj_alloc_slots;
550 	uint32_t *adj_free_slots;
551 	uint32_t *adj_buf_size;
552 	uint32_t *adj_buf_region_segment_size;
553 	uint32_t *adj_pp_region_config_flags;
554 	uint32_t *adj_max_frags;
555 	uint32_t *adj_event_rings;
556 	uint32_t *adj_event_slots;
557 	uint32_t *adj_max_buffers;
558 	uint32_t *adj_large_buf_size;
559 };
560 
561 extern int nxprov_params_adjust(struct kern_nexus_domain_provider *,
562     const uint32_t, const struct nxprov_params *, struct nxprov_params *,
563     struct skmem_region_params[SKMEM_REGIONS], const struct nxdom *,
564     const struct nxdom *, const struct nxdom *, uint32_t,
565     int (*adjust_fn)(const struct kern_nexus_domain_provider *,
566     const struct nxprov_params *, struct nxprov_adjusted_params *));
567 
568 extern void nxdom_attach_all(void);
569 extern void nxdom_detach_all(void);
570 extern struct nxdom *nxdom_find(nexus_type_t);
571 
572 extern struct kern_nexus_domain_provider *nxdom_prov_find(
573 	const struct nxdom *, const char *);
574 extern struct kern_nexus_domain_provider *nxdom_prov_find_uuid(const uuid_t);
575 extern int nxdom_prov_add(struct nxdom *, struct kern_nexus_domain_provider *);
576 extern void nxdom_prov_del(struct kern_nexus_domain_provider *);
577 extern void nxdom_prov_retain_locked(struct kern_nexus_domain_provider *);
578 extern void nxdom_prov_retain(struct kern_nexus_domain_provider *);
579 extern boolean_t nxdom_prov_release_locked(struct kern_nexus_domain_provider *);
580 extern boolean_t nxdom_prov_release(struct kern_nexus_domain_provider *);
581 extern int nxdom_prov_validate_params(struct kern_nexus_domain_provider *,
582     const struct nxprov_reg *, struct nxprov_params *,
583     struct skmem_region_params[SKMEM_REGIONS], const uint32_t, uint32_t);
584 
585 extern struct nxbind *nxb_alloc(zalloc_flags_t);
586 extern void nxb_free(struct nxbind *);
587 extern boolean_t nxb_is_equal(struct nxbind *, struct nxbind *);
588 extern void nxb_move(struct nxbind *, struct nxbind *);
589 
590 typedef void kern_nexus_walktree_f_t(struct kern_nexus *, void *);
591 extern void kern_nexus_walktree(kern_nexus_walktree_f_t *, void *, boolean_t);
592 
593 extern int kern_nexus_get_pbufpool_info(const uuid_t nx_uuid,
594     struct kern_pbufpool_memory_info *rx_pool,
595     struct kern_pbufpool_memory_info *tx_pool);
596 __END_DECLS
597 
598 #include <skywalk/nexus/nexus_adapter.h>
599 
600 __attribute__((always_inline))
601 static inline int
nx_sync_tx(struct __kern_channel_ring * kring,boolean_t commit)602 nx_sync_tx(struct __kern_channel_ring *kring, boolean_t commit)
603 {
604 	struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
605 
606 	ASSERT(kring->ckr_tx == NR_TX);
607 	if (nxprov->nxprov_ext.nxpi_sync_tx != NULL) {
608 		return nxprov->nxprov_ext.nxpi_sync_tx(nxprov,
609 		           KRNA(kring)->na_nx, kring,
610 		           (commit ? KERN_NEXUS_SYNCF_COMMIT : 0));
611 	} else {
612 		return 0;
613 	}
614 }
615 
616 __attribute__((always_inline))
617 static inline int
nx_sync_rx(struct __kern_channel_ring * kring,boolean_t commit)618 nx_sync_rx(struct __kern_channel_ring *kring, boolean_t commit)
619 {
620 	struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
621 
622 	ASSERT(kring->ckr_tx == NR_RX);
623 	if (nxprov->nxprov_ext.nxpi_sync_rx != NULL) {
624 		return nxprov->nxprov_ext.nxpi_sync_rx(nxprov,
625 		           KRNA(kring)->na_nx, kring,
626 		           (commit ? KERN_NEXUS_SYNCF_COMMIT : 0));
627 	} else {
628 		return 0;
629 	}
630 }
631 
632 __attribute__((always_inline))
633 static __inline__ void
nx_tx_doorbell(struct __kern_channel_ring * kring,boolean_t async)634 nx_tx_doorbell(struct __kern_channel_ring *kring, boolean_t async)
635 {
636 	struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
637 
638 	ASSERT(kring->ckr_tx == NR_TX);
639 	ASSERT(nxprov->nxprov_ext.nxpi_tx_doorbell != NULL);
640 	nxprov->nxprov_ext.nxpi_tx_doorbell(nxprov, KRNA(kring)->na_nx,
641 	    kring, (async ? KERN_NEXUS_TXDOORBELLF_ASYNC_REFILL: 0));
642 }
643 
644 __attribute__((always_inline))
645 static inline int
nx_rx_sync_packets(struct __kern_channel_ring * kring,uint64_t * __counted_by (* count)packets,uint32_t * count)646 nx_rx_sync_packets(struct __kern_channel_ring *kring,
647     uint64_t *__counted_by(*count)packets, uint32_t *count)
648 {
649 	struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
650 
651 	ASSERT(kring->ckr_tx == NR_RX);
652 	if (nxprov->nxprov_ext.nxpi_rx_sync_packets != NULL) {
653 		return nxprov->nxprov_ext.nxpi_rx_sync_packets(nxprov,
654 		           KRNA(kring)->na_nx, kring, packets, count, 0);
655 	} else {
656 		return 0;
657 	}
658 }
659 
660 __attribute__((always_inline))
661 static inline boolean_t
nx_has_rx_sync_packets(struct __kern_channel_ring * kring)662 nx_has_rx_sync_packets(struct __kern_channel_ring *kring)
663 {
664 	struct kern_nexus_provider *nxprov = NX_PROV(KRNA(kring)->na_nx);
665 
666 	ASSERT(kring->ckr_tx == NR_RX);
667 	return nxprov->nxprov_ext.nxpi_rx_sync_packets != NULL;
668 }
669 
670 __attribute__((always_inline))
671 static __inline__ errno_t
nx_tx_qset_notify(struct kern_nexus * nx,void * qset_ctx)672 nx_tx_qset_notify(struct kern_nexus *nx, void *qset_ctx)
673 {
674 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
675 	sk_protect_t protect;
676 	errno_t err;
677 
678 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_tx_qset_notify != NULL);
679 	protect = sk_tx_notify_protect();
680 	err = nxprov->nxprov_netif_ext.nxnpi_tx_qset_notify(nxprov, nx,
681 	    qset_ctx, 0);
682 	sk_tx_notify_unprotect(protect);
683 	return err;
684 }
685 #endif /* BSD_KERNEL_PRIVATE */
686 #endif /* _SKYWALK_NEXUS_NEXUSVAR_H_ */
687