1 /*
2 * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2012-2014 Matteo Landi, Luigi Rizzo, Giuseppe Lettieri.
31 * All rights reserved.
32 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
33 *
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
36 * are met:
37 * 1. Redistributions of source code must retain the above copyright
38 * notice, this list of conditions and the following disclaimer.
39 * 2. Redistributions in binary form must reproduce the above copyright
40 * notice, this list of conditions and the following disclaimer in the
41 * documentation and/or other materials provided with the distribution.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
47 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 */
55
56 #ifndef _SKYWALK_VAR_H_
57 #define _SKYWALK_VAR_H_
58
59 #ifdef BSD_KERNEL_PRIVATE
60 #include <stdint.h>
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/types.h>
64 #include <sys/cdefs.h>
65 #include <sys/errno.h>
66 #include <sys/malloc.h>
67 #include <sys/mbuf.h>
68 #include <sys/protosw.h>
69 #include <sys/queue.h>
70 #include <sys/tree.h>
71 #include <sys/sysctl.h>
72 #include <sys/poll.h>
73 #include <sys/priv.h>
74 #include <sys/random.h>
75 #include <sys/kernel.h>
76 #include <sys/guarded.h>
77 #include <uuid/uuid.h>
78 #include <kern/bits.h>
79 #include <kern/locks.h>
80 #include <kern/task.h>
81 #include <kern/thread.h>
82 #include <kern/zalloc.h>
83 #include <mach/boolean.h>
84 #include <machine/atomic.h>
85 #include <machine/endian.h>
86 #include <netinet/ip.h>
87 #include <net/dlil.h>
88 #include <net/necp.h>
89 #include <libkern/libkern.h>
90 #include <libkern/OSAtomic.h>
91 #include <libkern/OSByteOrder.h>
92 #include <IOKit/skywalk/IOSkywalkSupport.h>
93 #include <skywalk/os_nexus_private.h>
94 #include <skywalk/os_channel_private.h>
95 #include <skywalk/namespace/netns.h>
96 #include <skywalk/namespace/protons.h>
97 #include <skywalk/namespace/flowidns.h>
98 #include <vm/vm_kern.h>
99 #include <san/kasan.h>
100
101 /*
102 * General byte order swapping functions.
103 */
104 #define bswap16(x) OSSwapInt16(x)
105 #define bswap32(x) OSSwapInt32(x)
106 #define bswap64(x) OSSwapInt64(x)
107
108 /*
109 * Atomic operations.
110 */
111 #define SK_ATOMIC_TEST_AND_SET(p) (!os_atomic_cmpxchg((p), 0, 1, acq_rel))
112 #define SK_ATOMIC_CLEAR(p) os_atomic_store((p), 0, release)
113
114 /*
115 * feature bits defined in os_skywalk_private.h
116 */
117 extern uint64_t sk_features;
118
119 SYSCTL_DECL(_kern_skywalk);
120 SYSCTL_DECL(_kern_skywalk_stats);
121
122 #define SK_LOCK() \
123 lck_mtx_lock(&sk_lock)
124 #define SK_LOCK_TRY() \
125 lck_mtx_try_lock(&sk_lock)
126 #define SK_LOCK_ASSERT_HELD() \
127 LCK_MTX_ASSERT(&sk_lock, LCK_MTX_ASSERT_OWNED)
128 #define SK_LOCK_ASSERT_NOTHELD() \
129 LCK_MTX_ASSERT(&sk_lock, LCK_MTX_ASSERT_NOTOWNED)
130 #define SK_UNLOCK() \
131 lck_mtx_unlock(&sk_lock)
132
133 decl_lck_mtx_data(extern, sk_lock);
134 extern lck_grp_t sk_lock_group;
135 extern lck_attr_t sk_lock_attr;
136
137 /*
138 * Ring Types.
139 */
140 enum txrx {
141 NR_RX = 0, /* RX only */
142 NR_TX = 1, /* TX only */
143 NR_TXRX, /* RX+TX (alias) */
144 NR_A = NR_TXRX, /* alloc only */
145 NR_F, /* free only */
146 NR_TXRXAF, /* alloc+free (alias) */
147 NR_EV = NR_TXRXAF, /* event only */
148 NR_LBA, /* large buf alloc */
149 NR_ALL /* all of the above */
150 };
151
152 __attribute__((always_inline))
153 static inline const char *
sk_ring2str(enum txrx t)154 sk_ring2str(enum txrx t)
155 {
156 switch (t) {
157 case NR_TX:
158 return "TX";
159 case NR_RX:
160 return "RX";
161 case NR_A:
162 return "ALLOC";
163 case NR_F:
164 return "FREE";
165 case NR_EV:
166 return "EVENT";
167 case NR_LBA:
168 return "LARGE ALLOC";
169 default:
170 VERIFY(0);
171 /* NOTREACHED */
172 __builtin_unreachable();
173 }
174 }
175
176 __attribute__((always_inline))
177 static inline enum txrx
sk_txrx_swap(enum txrx t)178 sk_txrx_swap(enum txrx t)
179 {
180 return t == NR_RX ? NR_TX : NR_RX;
181 }
182
183 #define for_rx_tx(t) for ((t) = 0; (t) < NR_TXRX; (t)++)
184 #define for_a_f(t) for ((t) = NR_A; (t) <= NR_F; (t)++)
185 #define for_all_rings(t) for ((t) = 0; (t) < NR_ALL; (t)++)
186
187 /* return the next index, with wraparound */
188 __attribute__((always_inline))
189 static inline uint32_t
SLOT_NEXT(uint32_t i,uint32_t lim)190 SLOT_NEXT(uint32_t i, uint32_t lim)
191 {
192 return __improbable(i == lim) ? 0 : i + 1;
193 }
194
195 /* return the previous index, with wraparound */
196 __attribute__((always_inline))
197 static inline uint32_t
SLOT_PREV(uint32_t i,uint32_t lim)198 SLOT_PREV(uint32_t i, uint32_t lim)
199 {
200 return __improbable(i == 0) ? lim : i - 1;
201 }
202
203 /* return the incremented index, with wraparound */
204 static inline uint32_t
SLOT_INCREMENT(uint32_t i,uint32_t n,uint32_t lim)205 SLOT_INCREMENT(uint32_t i, uint32_t n, uint32_t lim)
206 {
207 i += n;
208 return __improbable(i > lim) ? i - lim - 1 : i;
209 }
210
211 /*
212 * Nexus metadata.
213 */
214 #define NX_METADATA_QUANTUM_SZ \
215 (MAX(sizeof (struct __user_quantum), sizeof (struct __kern_quantum)))
216 #define NX_METADATA_PACKET_SZ(_n) \
217 (MAX(_USER_PACKET_SIZE(_n), _KERN_PACKET_SIZE(_n)))
218
219 /* {min,max} internal user metadata object size */
220 #define NX_METADATA_OBJ_MIN_SZ \
221 (METADATA_PREAMBLE_SZ + NX_METADATA_QUANTUM_SZ)
222 #define NX_METADATA_OBJ_MAX_SZ 512
223
224 /* {min,max} client metadata size */
225 #define NX_METADATA_USR_MIN_SZ 0
226 #define NX_METADATA_USR_MAX_SZ \
227 (NX_METADATA_OBJ_MAX_SZ - NX_METADATA_OBJ_MIN_SZ)
228
229 /*
230 * User-visible statistics.
231 */
232 #define NX_STATS_MIN_SZ 0
233 #define NX_STATS_MAX_SZ (16 * 1024)
234
235 /*
236 * Flow advisory entries.
237 */
238 #define NX_FLOWADV_DEFAULT 512
239 #define NX_FLOWADV_MAX (64 * 1024)
240 #define FO_FLOWADV_CHUNK 64
241
242 /*
243 * Nexus advisory.
244 */
245 #define NX_NEXUSADV_MAX_SZ (16 * 1024)
246
247 /* {min,max} number of ring pairs in a nexus */
248 #define NX_MIN_NUM_RING_PAIR 1
249 #define NX_MAX_NUM_RING_PAIR 8 /* xxx unclear how many */
250 #define NX_MIN_NUM_RING (NX_MIN_NUM_RING_PAIR * 2)
251 #define NX_MAX_NUM_RING (NX_MAX_NUM_RING_PAIR * 2)
252
253 #define NX_MIN_NUM_SLOT_PER_RING 2
254 #define NX_MAX_NUM_SLOT_PER_RING (16 * 1024)
255
256 #define NX_MIN_BUF_OBJ_SIZE 64
257 #define NX_MAX_BUF_OBJ_SIZE (64 * 1024)
258
259 #define NX_PBUF_FRAGS_MIN 1
260 #define NX_PBUF_FRAGS_DEFAULT NX_PBUF_FRAGS_MIN
261 #define NX_PBUF_FRAGS_MAX 64
262
263 #define NX_MAX_AGGR_PKT_SIZE IP_MAXPACKET /* max aggregated pkt size */
264
265 /*
266 * Compat netif transmit models.
267 */
268 /* uses default parameters as set by driver */
269 #define NETIF_COMPAT_TXMODEL_DEFAULT 0
270 /* override driver parameters and force IFEF_ENQUEUE_MULTI */
271 #define NETIF_COMPAT_TXMODEL_ENQUEUE_MULTI 1
272
273 /*
274 * Native netif transmit models.
275 */
276 /* uses default parameters as set by driver */
277 #define NETIF_NATIVE_TXMODEL_DEFAULT 0
278 /* override driver parameters and force IFEF_ENQUEUE_MULTI */
279 #define NETIF_NATIVE_TXMODEL_ENQUEUE_MULTI 1
280
281 #define _timerisset(tvp) ((tvp)->tv_sec || (tvp)->tv_nsec)
282 #define _timersub(tvp, uvp, vvp) do { \
283 (vvp)->tv_sec = (tvp)->tv_sec - (uvp)->tv_sec; \
284 (vvp)->tv_nsec = (tvp)->tv_nsec - (uvp)->tv_nsec; \
285 if ((vvp)->tv_nsec < 0) { \
286 (vvp)->tv_sec--; \
287 (vvp)->tv_nsec += NSEC_PER_SEC; \
288 } \
289 } while (0)
290 #define _timernsec(tvp, nsp) do { \
291 *(nsp) = (tvp)->tv_nsec; \
292 if ((tvp)->tv_sec > 0) \
293 *(nsp) += ((tvp)->tv_sec * NSEC_PER_SEC); \
294 } while (0)
295
296 struct nexus_adapter;
297 struct kern_pbufpool;
298
299 extern uint32_t sk_opp_defunct;
300 extern uint32_t sk_cksum_tx;
301 extern uint32_t sk_cksum_rx;
302 extern uint32_t sk_guard;
303 extern uint32_t sk_headguard_sz;
304 extern uint32_t sk_tailguard_sz;
305
306 #if (DEVELOPMENT || DEBUG)
307 extern uint32_t sk_txring_sz;
308 extern uint32_t sk_rxring_sz;
309 extern uint32_t sk_net_txring_sz;
310 extern uint32_t sk_net_rxring_sz;
311 #endif /* !DEVELOPMENT && !DEBUG */
312
313 extern uint32_t sk_max_flows;
314 extern uint32_t sk_fadv_nchunks;
315 extern uint32_t sk_netif_compat_txmodel;
316 extern uint32_t sk_netif_native_txmodel;
317 extern uint16_t sk_tx_delay_qlen;
318 extern uint16_t sk_tx_delay_timeout;
319 extern uint32_t sk_netif_compat_aux_cell_tx_ring_sz;
320 extern uint32_t sk_netif_compat_aux_cell_rx_ring_sz;
321 extern uint32_t sk_netif_compat_wap_tx_ring_sz;
322 extern uint32_t sk_netif_compat_wap_rx_ring_sz;
323 extern uint32_t sk_netif_compat_awdl_tx_ring_sz;
324 extern uint32_t sk_netif_compat_awdl_rx_ring_sz;
325 extern uint32_t sk_netif_compat_wif_tx_ring_sz;
326 extern uint32_t sk_netif_compat_wif_rx_ring_sz;
327 extern uint32_t sk_netif_compat_usb_eth_tx_ring_sz;
328 extern uint32_t sk_netif_compat_usb_eth_rx_ring_sz;
329 extern int sk_netif_compat_rx_mbq_limit;
330 extern char sk_ll_prefix[IFNAMSIZ];
331 extern uint32_t sk_fsw_rx_agg_tcp;
332 extern uint32_t sk_fsw_tx_agg_tcp;
333 extern uint32_t sk_fsw_gso_mtu;
334
335 typedef enum fsw_rx_agg_tcp_host {
336 SK_FSW_RX_AGG_TCP_HOST_OFF = 0,
337 SK_FSW_RX_AGG_TCP_HOST_ON = 1,
338 SK_FSW_RX_AGG_TCP_HOST_AUTO
339 } fsw_rx_agg_tcp_host_t;
340 extern uint32_t sk_fsw_rx_agg_tcp_host;
341 extern uint32_t sk_fsw_max_bufs;
342
343 typedef enum netif_mit_cfg {
344 SK_NETIF_MIT_FORCE_OFF = 0, /* force mitigation OFF */
345 SK_NETIF_MIT_FORCE_SIMPLE, /* force mitigation ON (simple) */
346 SK_NETIF_MIT_FORCE_ADVANCED, /* force mitigation ON (advanced) */
347 SK_NETIF_MIT_AUTO, /* automatic (default) */
348 SK_NETIF_MIT_MAX = SK_NETIF_MIT_AUTO,
349 } netif_mit_cfg_t;
350 extern uint32_t sk_netif_tx_mit;
351 extern uint32_t sk_netif_rx_mit;
352 extern uint32_t sk_channel_buflet_alloc;
353 extern uint32_t sk_min_pool_size;
354 extern uint32_t sk_netif_queue_stat_enable;
355
356 struct sk_protect;
357 typedef const struct sk_protect *__single sk_protect_t;
358
359 __attribute__((always_inline))
360 static inline boolean_t
sk_is_sync_protected(void)361 sk_is_sync_protected(void)
362 {
363 return net_thread_is_marked(NET_THREAD_CHANNEL_SYNC) != 0;
364 }
365
366 __attribute__((always_inline))
367 static inline sk_protect_t
sk_sync_protect(void)368 sk_sync_protect(void)
369 {
370 return (sk_protect_t)(const void *)
371 net_thread_marks_push(NET_THREAD_CHANNEL_SYNC);
372 }
373
374
375 __attribute__((always_inline))
376 static inline boolean_t
sk_is_rx_notify_protected(void)377 sk_is_rx_notify_protected(void)
378 {
379 return net_thread_is_marked(NET_THREAD_RX_NOTIFY) != 0;
380 }
381
382 __attribute__((always_inline))
383 static inline sk_protect_t
sk_rx_notify_protect(void)384 sk_rx_notify_protect(void)
385 {
386 return (sk_protect_t)(const void *)
387 net_thread_marks_push(NET_THREAD_RX_NOTIFY);
388 }
389
390 __attribute__((always_inline))
391 static inline sk_protect_t
sk_tx_notify_protect(void)392 sk_tx_notify_protect(void)
393 {
394 return (sk_protect_t)(const void *)
395 net_thread_marks_push(NET_THREAD_TX_NOTIFY);
396 }
397
398 __attribute__((always_inline))
399 static inline boolean_t
sk_is_tx_notify_protected(void)400 sk_is_tx_notify_protected(void)
401 {
402 return net_thread_is_marked(NET_THREAD_TX_NOTIFY) != 0;
403 }
404
405 __attribute__((always_inline))
406 static inline boolean_t
sk_is_cache_update_protected(void)407 sk_is_cache_update_protected(void)
408 {
409 return net_thread_is_marked(NET_THREAD_CACHE_UPDATE) != 0;
410 }
411
412 __attribute__((always_inline))
413 static inline sk_protect_t
sk_cache_update_protect(void)414 sk_cache_update_protect(void)
415 {
416 return (sk_protect_t)(const void *)
417 net_thread_marks_push(NET_THREAD_CACHE_UPDATE);
418 }
419
420 __attribute__((always_inline))
421 static inline boolean_t
sk_is_region_update_protected(void)422 sk_is_region_update_protected(void)
423 {
424 return net_thread_is_marked(NET_THREAD_REGION_UPDATE) != 0;
425 }
426
427 __attribute__((always_inline))
428 static inline sk_protect_t
sk_region_update_protect(void)429 sk_region_update_protect(void)
430 {
431 return (sk_protect_t)(const void *)
432 net_thread_marks_push(NET_THREAD_REGION_UPDATE);
433 }
434
435 __attribute__((always_inline))
436 static inline boolean_t
sk_is_async_transmit_protected(void)437 sk_is_async_transmit_protected(void)
438 {
439 return net_thread_is_marked(NET_THREAD_AYSYNC_TX) != 0;
440 }
441
442 __attribute__((always_inline))
443 static inline sk_protect_t
sk_async_transmit_protect(void)444 sk_async_transmit_protect(void)
445 {
446 return (sk_protect_t)(const void *)
447 net_thread_marks_push(NET_THREAD_AYSYNC_TX);
448 }
449
450 #define sk_sync_unprotect sk_unprotect
451 #define sk_cache_update_unprotect sk_unprotect
452 #define sk_region_update_unprotect sk_unprotect
453 #define sk_tx_notify_unprotect sk_unprotect
454 #define sk_async_transmit_unprotect sk_unprotect
455
456 __attribute__((always_inline))
457 static inline void
sk_unprotect(sk_protect_t protect)458 sk_unprotect(sk_protect_t protect)
459 {
460 net_thread_marks_pop((net_thread_marks_t)(const void*)protect);
461 }
462
463
464
465 /*
466 * For sysctls that allocate a buffer to fill then copyout at completion,
467 * set an upper bound on the size of the buffer we'll allocate.
468 */
469 #define SK_SYSCTL_ALLOC_MAX ((size_t)(100 * 1024 * 1024))
470
471 #if (DEVELOPMENT || DEBUG)
472 typedef void (*_null_func_t)(void);
473 #define null_func ((_null_func_t)NULL)
474
475 extern uint32_t sk_inject_error_rmask;
476 #define _SK_INJECT_ERROR(_ie, _en, _ev, _ec, _ej, _f, ...) do { \
477 if (__improbable(((_ie) & (1ULL << (_en))) != 0)) { \
478 if ((random() & sk_inject_error_rmask) != \
479 sk_inject_error_rmask) \
480 break; \
481 if ((_ej) != NULL) (*(_ej))++; \
482 SK_DF(SK_VERB_ERROR_INJECT, "injecting error %d", (_en));\
483 if ((_f) != NULL) \
484 (_f)(__VA_ARGS__); \
485 (_ev) = (_ec); \
486 } \
487 } while (0)
488 #else
489 #define _SK_INJECT_ERROR(_en, _ev, _ec, _f, ...)
490 #endif /* DEVELOPMENT || DEBUG */
491
492 __BEGIN_DECLS
493 extern int skywalk_init(void);
494 extern int skywalk_priv_check_cred(proc_t, kauth_cred_t, int);
495 extern int skywalk_priv_check_proc_cred(proc_t, int);
496 #if CONFIG_MACF
497 extern int skywalk_mac_system_check_proc_cred(proc_t, const char *);
498 #endif /* CONFIG_MACF */
499 extern int skywalk_nxctl_check_privileges(proc_t, kauth_cred_t);
500 extern boolean_t skywalk_check_platform_binary(proc_t);
501 extern boolean_t skywalk_netif_direct_allowed(const char *);
502 extern boolean_t skywalk_netif_direct_enabled(void);
503 extern void sk_gen_guard_id(boolean_t, const uuid_t, guardid_t *);
504 extern char *__counted_by(sizeof(uuid_string_t)) sk_uuid_unparse(const uuid_t,
505 uuid_string_t);
506 #if SK_LOG
507 #define SK_DUMP_BUF_SIZE 2048
508 extern const char *__counted_by(SK_DUMP_BUF_SIZE) sk_dump(const char *label,
509 const void *__sized_by(len) obj, int len, int dumplen);
510 extern const char *sk_proc_name_address(struct proc *);
511 extern const char *sk_proc_name(struct proc *);
512 extern int sk_proc_pid(struct proc *);
513
514 /* skywalk ntop function that follows privacy (IP redaction) setting */
515 extern const char * sk_ntop(int af, const void *addr,
516 char *__counted_by(addr_strlen)addr_str, size_t addr_strlen);
517 extern const char *sk_sa_ntop(struct sockaddr *sa,
518 char *__counted_by(addr_strlen)addr_str, size_t addr_strlen);
519 extern const char *sk_memstatus2str(uint32_t);
520 #endif /* SK_LOG */
521
522 extern bool sk_sa_has_addr(struct sockaddr *sa);
523 extern bool sk_sa_has_port(struct sockaddr *sa);
524 extern uint16_t sk_sa_get_port(struct sockaddr *sa);
525
526 extern void skywalk_kill_process(struct proc *, uint64_t);
527
528 enum skywalk_kill_reason {
529 SKYWALK_KILL_REASON_GENERIC = 0,
530 SKYWALK_KILL_REASON_HEAD_OOB,
531 SKYWALK_KILL_REASON_HEAD_OOB_WRAPPED,
532 SKYWALK_KILL_REASON_CUR_OOB,
533 SKYWALK_KILL_REASON_CUR_OOB_WRAPPED_1,
534 SKYWALK_KILL_REASON_CUR_OOB_WRAPPED_2,
535 SKYWALK_KILL_REASON_TAIL_MISMATCH,
536 SKYWALK_KILL_REASON_BASIC_SANITY,
537 SKYWALK_KILL_REASON_UNALLOCATED_PKT,
538 SKYWALK_KILL_REASON_SLOT_NOT_DETACHED,
539 SKYWALK_KILL_REASON_QUM_IDX_MISMATCH,
540 SKYWALK_KILL_REASON_SYNC_FAILED,
541 SKYWALK_KILL_REASON_INCONSISTENT_READY_BYTES,
542 SKYWALK_KILL_REASON_BAD_BUFLET_CHAIN,
543 SKYWALK_KILL_REASON_INTERNALIZE_FAILED,
544 };
545
546 #define SKYWALK_KILL_REASON_TX_SYNC 0x0000000000000000ULL
547 #define SKYWALK_KILL_REASON_EVENT_SYNC 0x1000000000000000ULL
548 #define SKYWALK_KILL_REASON_FREE_SYNC 0x2000000000000000ULL
549 #define SKYWALK_KILL_REASON_ALLOC_SYNC 0x4000000000000000ULL
550 #define SKYWALK_KILL_REASON_RX_SYNC 0x8000000000000000ULL
551
552 /* for convenience */
553 extern const char *proc_name_address(void *p);
554
555 /*
556 * skoid is the glue that holds the Skywalk struct model and sysctl properties
557 * together. It's supposed to be embedded in other Skywalk struct, for instance
558 * channel, nexus, etc. skoid can holds variable number of properties, which
559 * is automatically made available to the sysctl interface under the parent
560 * skoid sysctl node.
561 *
562 * The embedding struct should call skoid_create, which does the initialization
563 * and registration of the associated sysctl_oid under the parent node. All
564 * first level dynamic skoid nodes must hang under static sysctl nodes defined
565 * with traditional SYSCTL_NODE macro in linker set.
566 * skoid_create(1st_level_skoid, skoid_SNODE(_linker_sysctl), name, kind)
567 *
568 * The fields in embedding skoid can be expressed as properties of the skoid,
569 * or separate skoid, depending on the model. If the field is of primitive
570 * types, then properties should be used. If the field is of compound types
571 * (e.g. struct), another layer of skoid might be created under the parent.
572 *
573 * To add properties to the skoid, call one of the skoid_add_* functions.
574 * skoid_add_int(&skoid, name, flags, int_ptr)
575 * To add another skoid as child of a skoid, allocate and call skoid_create
576 * with the skoid_DNODE(parent_skoid) as parent argument.
577 * skoid_create(2+_level_skoid, skoid_DNODE(parent_skoid), name, kind)
578 *
579 * About life cycle: the embedding struct of skoid must outlive the skoid.
580 * skoid itself store a cached name, so there is no restriction of the name
581 * buffer life cycle. Property name should be a const string or string with
582 * longer life cycle than the skoid. Most often, the skoid has a variable name
583 * reflecting the Skywalk struct name (e.g. "ms.en0", while the properties has
584 * a fixed name same as the struct member variable name.
585 *
586 * Please use caution regarding access control of skoid properties.
587 */
588 #define SKOID_SNODE(static_parent) (&(sysctl_##static_parent##_children))
589 #define SKOID_DNODE(dynamic_parent) (&(dynamic_parent.sko_oid_list))
590 #define SKOID_NAME_SIZE 32
591
592 struct skoid {
593 struct sysctl_oid_list sko_oid_list; /* self sko_oid & properties */
594 struct sysctl_oid sko_oid; /* self sysctl oid storage */
595 char sko_name[SKOID_NAME_SIZE]; /* skoid name */
596 };
597
598 extern void skoid_init(void);
599 extern void skoid_create(struct skoid *skoid, struct sysctl_oid_list *parent,
600 const char *name, int kind);
601 extern void skoid_add_int(struct skoid *skoid, const char *name, int flags,
602 int *ptr);
603 extern void skoid_add_uint(struct skoid *skoid, const char *name, int flags,
604 unsigned int *ptr);
605 extern void skoid_add_handler(struct skoid *skoid, const char *name, int kind,
606 int (*handler)SYSCTL_HANDLER_ARGS, void *arg1, int arg2);
607 extern void skoid_destroy(struct skoid *skoid);
608
609 /*
610 * To avoid accidentally invoking skoid procedure by `sysctl` tool, use this
611 * macro as guard, so proc is only called with a parameter, e.g.
612 * sysctl <skoid_proc_name>=1
613 */
614 #define SKOID_PROC_CALL_GUARD do { \
615 if (req->newptr == USER_ADDR_NULL) \
616 return (0); \
617 } while (0)
618
619 extern kern_allocation_name_t skmem_tag_oid;
620 extern kern_allocation_name_t skmem_tag_sysctl_buf;
621
622 __END_DECLS
623 #endif /* BSD_KERNEL_PRIVATE */
624 #endif /* _SKYWALK_VAR_H_ */
625