xref: /xnu-12377.81.4/bsd/skywalk/channel/os_channel.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_OS_CHANNEL_H_
30 #define _SKYWALK_OS_CHANNEL_H_
31 
32 #ifdef PRIVATE
33 
34 #include <stdint.h>
35 #include <sys/types.h>
36 #include <sys/cdefs.h>
37 #include <uuid/uuid.h>
38 #include <mach/vm_types.h>
39 #include <skywalk/os_nexus.h>
40 #include <skywalk/os_packet.h>
41 #ifndef KERNEL
42 #include <skywalk/os_channel_event.h>
43 #include <net/if_var.h>
44 #endif /* !KERNEL */
45 
46 /*
47  * Compiler guards used by Libnetcore.
48  */
49 #define OS_CHANNEL_HAS_NUM_BUFFERS_ATTR 1 /* CHANNEL_ATTR_NUM_BUFFERS */
50 #define OS_CHANNEL_HAS_LARGE_PACKET 1     /* CHANNEL_ATTR_LARGE_BUF_SIZE and */
51                                           /* os_channel_large_packet_alloc() */
52 #define OS_CHANNEL_HAS_BUFFER_STATS 1     /* os_channel_get_buffer_stats() */
53 
54 /* Flow advisory table index */
55 typedef uint32_t flowadv_idx_t;
56 #define FLOWADV_IDX_NONE                ((flowadv_idx_t)-1)
57 
58 /*
59  * Channel ring direction.
60  */
61 typedef enum {
62 	CHANNEL_DIR_TX_RX,      /* default: TX and RX ring(s) */
63 	CHANNEL_DIR_TX,         /* only TX ring(s) */
64 	CHANNEL_DIR_RX          /* only RX ring(s) */
65 } ring_dir_t;
66 
67 /*
68  * Channel ring ID.
69  */
70 typedef uint32_t ring_id_t;
71 #define CHANNEL_RING_ID_ANY             ((ring_id_t)-1)
72 
73 typedef enum {
74 	CHANNEL_FIRST_TX_RING,
75 	CHANNEL_LAST_TX_RING,
76 	CHANNEL_FIRST_RX_RING,
77 	CHANNEL_LAST_RX_RING
78 } ring_id_type_t;
79 
80 /* Sync mode values */
81 typedef enum {
82 	CHANNEL_SYNC_TX,        /* synchronize TX ring(s) */
83 	CHANNEL_SYNC_RX,        /* synchronize RX ring(s) */
84 #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE)
85 	CHANNEL_SYNC_UPP        /* synchronize packet pool rings only */
86 #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */
87 } sync_mode_t;
88 
89 /* Sync flags */
90 typedef uint32_t sync_flags_t;
91 #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE)
92 #define CHANNEL_SYNCF_ALLOC        0x1     /* synchronize alloc ring */
93 #define CHANNEL_SYNCF_FREE         0x2     /* synchronize free ring */
94 #define CHANNEL_SYNCF_PURGE        0x4     /* purge user packet pool */
95 #define CHANNEL_SYNCF_ALLOC_BUF    0x8     /* synchronize buflet alloc ring */
96 #define CHANNEL_SYNCF_LARGE_ALLOC  0x10    /* synchronize large alloc ring */
97 #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */
98 
99 /*
100  * Opaque handles.
101  */
102 struct channel;
103 struct channel_ring_desc;
104 struct __slot_desc;
105 struct channel_attr;
106 
107 typedef struct channel                  *channel_t;
108 typedef struct channel_ring_desc        *channel_ring_t;
109 typedef struct __slot_desc              *channel_slot_t;
110 typedef struct channel_attr             *channel_attr_t;
111 
112 /*
113  * Channel threshold unit types.
114  */
115 typedef enum {
116 	CHANNEL_THRESHOLD_UNIT_SLOTS,   /* unit in slots (default) */
117 	CHANNEL_THRESHOLD_UNIT_BYTES,   /* unit in bytes */
118 } channel_threshold_unit_t;
119 
120 /*
121  * Channel attribute types gettable/settable via os_channel_attr_{get,set}.
122  *
123  *     g: retrievable at any time
124  *     s: settable at any time
125  *     S: settable once, only at creation time
126  */
127 typedef enum {
128 	CHANNEL_ATTR_TX_RINGS,          /* (g) # of transmit rings */
129 	CHANNEL_ATTR_RX_RINGS,          /* (g) # of receive rings */
130 	CHANNEL_ATTR_TX_SLOTS,          /* (g) # of slots per transmit ring */
131 	CHANNEL_ATTR_RX_SLOTS,          /* (g) # of slots per receive ring */
132 	CHANNEL_ATTR_SLOT_BUF_SIZE,     /* (g) buffer per slot (bytes) */
133 	CHANNEL_ATTR_SLOT_META_SIZE,    /* (g) metadata per slot (bytes) */
134 	CHANNEL_ATTR_EXCLUSIVE,         /* (g/s) bool: exclusive open */
135 	CHANNEL_ATTR_NO_AUTO_SYNC,      /* (g/s) bool: will do explicit sync */
136 	CHANNEL_ATTR_UNUSED_1,          /* unused */
137 	CHANNEL_ATTR_TX_LOWAT_UNIT,     /* (g/s) see channel_threshold_unit_t */
138 	CHANNEL_ATTR_TX_LOWAT_VALUE,    /* (g/s) transmit low-watermark */
139 	CHANNEL_ATTR_RX_LOWAT_UNIT,     /* (g/s) see channel_threshold_unit_t */
140 	CHANNEL_ATTR_RX_LOWAT_VALUE,    /* (g/s) receive low-watermark */
141 	CHANNEL_ATTR_NEXUS_TYPE,        /* (g) nexus type */
142 	CHANNEL_ATTR_NEXUS_EXTENSIONS,  /* (g) nexus extension(s) */
143 	CHANNEL_ATTR_NEXUS_MHINTS,      /* (g) nexus memory usage hints */
144 	CHANNEL_ATTR_TX_HOST_RINGS,     /* (g) # of transmit host rings */
145 	CHANNEL_ATTR_RX_HOST_RINGS,     /* (g) # of receive host rings */
146 	CHANNEL_ATTR_NEXUS_IFINDEX,     /* (g) nexus network interface index */
147 	CHANNEL_ATTR_NEXUS_STATS_SIZE,  /* (g) nexus statistics region size */
148 	CHANNEL_ATTR_NEXUS_FLOWADV_MAX, /* (g) # of flow advisory entries */
149 	CHANNEL_ATTR_NEXUS_META_TYPE,   /* (g) nexus metadata type */
150 	CHANNEL_ATTR_NEXUS_META_SUBTYPE, /* (g) nexus metadata subtype */
151 	CHANNEL_ATTR_NEXUS_CHECKSUM_OFFLOAD, /* (g) nexus checksum offload */
152 	CHANNEL_ATTR_USER_PACKET_POOL,  /* (g/s) bool: use user packet pool */
153 	CHANNEL_ATTR_NEXUS_ADV_SIZE,    /* (g) nexus advisory region size */
154 	CHANNEL_ATTR_NEXUS_DEFUNCT_OK,  /* (g/s) bool: allow defunct */
155 	CHANNEL_ATTR_FILTER,            /* (g/s) bool: filter mode */
156 	CHANNEL_ATTR_EVENT_RING,        /* (g/s) bool: enable event ring */
157 	CHANNEL_ATTR_MAX_FRAGS,         /* (g) max length of buflet chain */
158 	CHANNEL_ATTR_NUM_BUFFERS,       /* (g) # of buffers in user pool */
159 	CHANNEL_ATTR_LOW_LATENCY,       /* (g/s) bool: low latency channel */
160 	CHANNEL_ATTR_LARGE_BUF_SIZE,    /* (g) large buffer size (bytes) */
161 } channel_attr_type_t;
162 
163 /*
164  * Channel nexus metadata type.
165  */
166 typedef enum {
167 	CHANNEL_NEXUS_META_TYPE_INVALID = 0,
168 	CHANNEL_NEXUS_META_TYPE_QUANTUM, /* OK for os_packet quantum APIs */
169 	CHANNEL_NEXUS_META_TYPE_PACKET,  /* OK for all os_packet APIs */
170 } channel_nexus_meta_type_t;
171 
172 /*
173  * Channel nexus metadata subtype.
174  */
175 typedef enum {
176 	CHANNEL_NEXUS_META_SUBTYPE_INVALID = 0,
177 	CHANNEL_NEXUS_META_SUBTYPE_PAYLOAD,
178 	CHANNEL_NEXUS_META_SUBTYPE_RAW,
179 } channel_nexus_meta_subtype_t;
180 
181 /*
182  * Valid values for CHANNEL_ATTR_NEXUS_CHECKSUM_OFFLOAD
183  */
184 #define CHANNEL_NEXUS_CHECKSUM_PARTIAL  0x1     /* partial checksum */
185 
186 /*
187  * Channel statistics ID.
188  */
189 typedef enum {
190 	CHANNEL_STATS_ID_IP = 0,        /* struct ip_stats */
191 	CHANNEL_STATS_ID_IP6,           /* struct ip6_stats */
192 	CHANNEL_STATS_ID_TCP,           /* struct tcp_stats */
193 	CHANNEL_STATS_ID_UDP,           /* struct udp_stats */
194 	CHANNEL_STATS_ID_QUIC,          /* struct quic_stats */
195 } channel_stats_id_t;
196 
197 /*
198  * Slot properties.  Structure is aligned to allow for efficient copy.
199  *
200  * Fields except for sp_{flags,len} are immutables (I).  The system will
201  * verify for correctness during os_channel_put() across the immutable
202  * fields, and will abort the process if it detects inconsistencies.
203  * This is meant to help with debugging, since it indicates bugs and/or
204  * memory corruption.
205  */
206 typedef struct slot_prop {
207 	uint16_t sp_flags;              /* private flags */
208 	uint16_t sp_len;                /* length for this slot */
209 	uint32_t sp_idx;                /* (I) slot index */
210 	mach_vm_address_t sp_ext_ptr;   /* (I) pointer for indirect buffer */
211 	mach_vm_address_t sp_buf_ptr;   /* (I) pointer for native buffer */
212 	mach_vm_address_t sp_mdata_ptr; /* (I) pointer for metadata */
213 	uint32_t _sp_pad[8];            /* reserved */
214 } slot_prop_t __attribute__((aligned(sizeof(uint64_t))));
215 
216 #ifndef KERNEL
217 /*
218  * User APIs.
219  */
220 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
221 __BEGIN_DECLS
222 /*
223  * Creates a Channel attribute object.
224  *
225  * This must be paired with a os_channel_attr_destroy() on the handle.
226  */
227 extern channel_attr_t os_channel_attr_create(void);
228 
229 /*
230  * Clones a Channel attribute object.  If source attribute is NULL
231  * it behaves just like os_channel_attr_create();
232  *
233  * This must be paired with a os_channel_attr_destroy() on the handle.
234  */
235 extern channel_attr_t os_channel_attr_clone(const channel_attr_t);
236 
237 /*
238  * Sets a value for a given attribute type on a Channel attribute object.
239  */
240 extern int os_channel_attr_set(const channel_attr_t attr,
241     const channel_attr_type_t type, const uint64_t value);
242 
243 /*
244  * Sets a key blob on a Channel attribute object.  Existing key blob
245  * information in the attribute object will be removed, if any, and
246  * replaced with the new key blob.  Specifying 0 for key_len will
247  * clear the key stored in the attribute object.  The maximum key
248  * length is specified by NEXUS_MAX_KEY_LEN.
249  */
250 extern int os_channel_attr_set_key(const channel_attr_t attr,
251     const void *key, const uint32_t key_len);
252 
253 /*
254  * Gets a value for a given attribute type on a Channel attribute object.
255  */
256 extern int os_channel_attr_get(const channel_attr_t attr,
257     const channel_attr_type_t type, uint64_t *value);
258 
259 /*
260  * Gets a key blob on a Channel attribute object.  If key is NULL,
261  * returns the length of the key blob with key_len, so caller knows
262  * how much to allocate space for key blob.
263  */
264 extern int os_channel_attr_get_key(const channel_attr_t attr,
265     void *key, uint32_t *key_len);
266 
267 /*
268  * Destroys a Channel attribute object, along with all resources
269  * associated with it (e.g. key blob).
270  */
271 extern void os_channel_attr_destroy(const channel_attr_t attr);
272 
273 /*
274  * Opens a Channel to a Nexus provider instance.  Upon success, maps memory
275  * region and allocates resources.
276  *
277  * This must be paired with a os_channel_destroy() on the handle, in order to
278  * unmap the memory region and free resources.
279  */
280 extern channel_t os_channel_create(const uuid_t uuid, const nexus_port_t port);
281 
282 /*
283  * Extended version of os_channel_create().
284  */
285 extern channel_t os_channel_create_extended(const uuid_t uuid,
286     const nexus_port_t port, const ring_dir_t dir, const ring_id_t rid,
287     const channel_attr_t attr);
288 
289 /*
290  * Retrieves the file descriptor associated with the Channel.
291  */
292 extern int os_channel_get_fd(const channel_t channel);
293 
294 /*
295  * Retrieves current channel attributes into the channel_attr_t handle.
296  */
297 extern int os_channel_read_attr(const channel_t channel, channel_attr_t attr);
298 
299 /*
300  * Updates channel attributes based on those referred to by the channel_attr_t
301  * handle.  See comments above on channel_attr_type_t; this routine will only
302  * update attributes that are marked with 's' but not 'S'.
303  */
304 extern int os_channel_write_attr(const channel_t channel, channel_attr_t attr);
305 
306 /*
307  * Retrieves channel's associated nexus type into *nexus_type, and the
308  * provider-specific extension attribute into *ext.
309  */
310 extern int os_channel_read_nexus_extension_info(const channel_t channel,
311     nexus_type_t *nexus_type, uint64_t *ext);
312 
313 /*
314  * Non-blocking synchronization.  Channel handle may also be used
315  * with kqueue(2), select(2) or poll(2) through the file descriptor.
316  */
317 extern int os_channel_sync(const channel_t channel, const sync_mode_t mode);
318 
319 /*
320  * Destroys a Channel.
321  */
322 extern void os_channel_destroy(const channel_t channel);
323 
324 /*
325  * Checks if a channel is defunct.  Returns non-zero if defunct.
326  */
327 extern int os_channel_is_defunct(const channel_t channel);
328 
329 /*
330  * Data Movement APIs.
331  *
332  * Obtain channel_ring_t handle via os_channel_{tx,rx}_ring().  You will
333  * need to specify the ring_id_t which identifies the ring — this is true
334  * even for a single TX/RX ring case.  The Nexus provider can communicate
335  * to the client the ID of the TX and RX ring that should be used to
336  * communicate to it, through a contract between the two.  For instance,
337  * it can tell the client to use first TX ring and first RX ring, etc.
338  * through some side-channel.  It should not assume 0 or any other number
339  * as ID, however, as the in-kernel Nexus object is the authoritative source
340  * of truth.  This is where the os_channel_ring_id() call comes into the
341  * picture, as it will return the first and last usable TX and RX ring IDs
342  * for the Channel opened to that Nexus object.
343  *
344  * Once the TX or RX ring handle is obtained above, the client can ask for
345  * the first usable slot in the ring through os_channel_get_next_slot()
346  * passing NULL for the 'slot' parameter. This returns a channel_slot_t
347  * handle that represents the slot, along with the properties of that slot
348  * described by the slot_prop_t structure. If no slots are available, this
349  * call returns a NULL handle.  It’s important to note that this
350  * call does NOT advance the ring’s current slot pointer; calling this
351  * multiple times in succession will yield the same result.
352  *
353  * The client proceeds to use the slot by examining the returned
354  * slot_prop_t fields including the pointer to the internal buffer
355  * associated with that slot.  Once the client is finished, it updates
356  * the relevant slot_prop_t fields (e.g. length) and calls
357  * os_channel_set_slot_properties() to apply them to the slot.
358  *
359  * To get the next slot, the client provides the non-NULL slot value obtained
360  * from the previous call to os_channel_get_next_slot() as the 'slot' parameter
361  * in its next invocation of that function.
362  *
363  * To advance the ring’s current pointer, the client invokes
364  * os_channel_advance_slot() specifying the slot to advance past. If the slot
365  * is invalid, this function returns a non-zero value.
366  *
367  * Once the client is ready to commit, call os_channel_sync() in
368  * either/all directions.
369  */
370 extern ring_id_t os_channel_ring_id(const channel_t channel,
371     const ring_id_type_t type);
372 extern channel_ring_t os_channel_tx_ring(const channel_t channel,
373     const ring_id_t rid);
374 extern channel_ring_t os_channel_rx_ring(const channel_t channel,
375     const ring_id_t rid);
376 extern int os_channel_pending(const channel_ring_t ring);
377 
378 /*
379  * This returns a nexus-specific timestamp in nanoseconds taken at the
380  * lasttime os_channel_sync() or its equivalent implicit kevent sync
381  * was called
382  */
383 extern uint64_t os_channel_ring_sync_time(const channel_ring_t ring);
384 
385 /*
386  * This returns a nexus-specific timestamp in nanoseconds to indicate
387  * the time of last activity on the opposite end of the ring.
388  * This is only updated when sync or kevent equivalent is called.
389  */
390 extern uint64_t os_channel_ring_notify_time(const channel_ring_t ring);
391 
392 /*
393  * For TX ring os_channel_available_slot_count() returns the minimum number
394  * of slots available availble for TX, and it is possible that
395  * os_channel_get_next_slot() will return more slots than the what was
396  * returned by an earlier call to os_channel_available_slot_count()
397  */
398 extern uint32_t os_channel_available_slot_count(const channel_ring_t ring);
399 extern channel_slot_t os_channel_get_next_slot(const channel_ring_t ring,
400     const channel_slot_t slot, slot_prop_t *prop);
401 extern int os_channel_advance_slot(channel_ring_t ring,
402     const channel_slot_t slot);
403 extern void os_channel_set_slot_properties(const channel_ring_t ring,
404     const channel_slot_t slot, const slot_prop_t *prop);
405 
406 /*
407  * Return the packet handle associated with a given slot of a ring.
408  */
409 extern packet_t os_channel_slot_get_packet(const channel_ring_t ring,
410     const channel_slot_t slot);
411 
412 /*
413  * Each nexus that the channel is connected to determines whether or
414  * not there is a shareable statistics region identified by one of
415  * the channel_stats_id_t values.  This routine returns a pointer to
416  * such a region upon success, or NULL if not supported by the nexus.
417  */
418 extern void *os_channel_get_stats_region(const channel_t channel,
419     const channel_stats_id_t id);
420 
421 /*
422  * Each nexus that the channel is connected to determines whether or
423  * not there is a nexus-wide advisory region.  This routine returns
424  * a pointer to such a region upon success, or NULL if not supported
425  * by the nexus.
426  */
427 extern void *os_channel_get_advisory_region(const channel_t channel);
428 
429 /*
430  * Each nexus that supports flow admission control may be queried to
431  * advise whether or not the channel is willing to admit more packets
432  * for a given flow.  A return value of 0 indicates that the packets
433  * for the flow are admissible.  If ENOBUFS is returned, the flow is
434  * currently suspended, and further attempts to send more packets on
435  * the ring may result in drops.  Any other error values indicate
436  * that either the nexus doesn't support admission control, or the
437  * arguments aren't valid.
438  */
439 extern int os_channel_flow_admissible(const channel_ring_t ring,
440     uuid_t flow_id, const flowadv_idx_t flow_index);
441 
442 extern int os_channel_flow_adv_get_ce_count(const channel_ring_t chrd,
443     uuid_t flow_id, const flowadv_idx_t flow_index, uint32_t *ce_cnt,
444     uint32_t *pkt_cnt);
445 
446 #define AQM_CONGESTION_FEEDBACK 1
447 extern int os_channel_flow_adv_get_feedback(const channel_ring_t chrd,
448     uuid_t flow_id, const flowadv_idx_t flow_index, uint32_t *congestion_cnt,
449     uint32_t *ce_cnt, uint32_t *pkt_cnt);
450 /*
451  * Allocate a packet from the channel's packet pool.
452  * Returns 0 on success with the packet handle in packet arg.
453  * Note: os_channel_packet_alloc() & os_channel_packet_free() should be
454  * serialized and should not be called from the different thread context.
455  */
456 extern int
457 os_channel_packet_alloc(const channel_t chd, packet_t *packet);
458 
459 /*
460  * Allocate a large packet from the channel's packet pool.
461  * Returns 0 on success with the packet handle in packet arg.
462  * Note: os_channel_large_packet_alloc() & os_channel_packet_free() should be
463  * serialized and should not be called from the different thread context.
464  */
465 extern int
466 os_channel_large_packet_alloc(const channel_t chd, packet_t *packet);
467 
468 /*
469  * Free a packet allocated from the channel's packet pool.
470  * Returns 0 on success
471  * Note: os_channel_packet_alloc() & os_channel_packet_free() should be
472  * serialized and should not be called from the different thread context.
473  */
474 extern int
475 os_channel_packet_free(const channel_t chd, packet_t packet);
476 
477 /*
478  * Attach the given packet to a channel slot
479  */
480 extern int
481 os_channel_slot_attach_packet(const channel_ring_t chrd,
482     const channel_slot_t slot, packet_t packet);
483 
484 /*
485  * Detach a given packet from a channel slot
486  */
487 extern int
488 os_channel_slot_detach_packet(const channel_ring_t chrd,
489     const channel_slot_t slot, packet_t packet);
490 
491 /*
492  * purge packets from the channel's packet pool.
493  * This API should be called at regular intervals by application to purge
494  * unused packets from the channel's packet pool. Recommended interval is
495  * 11 seconds.
496  * Returns 0 on success.
497  * Note: This call should be serialized with os_channel_packet_alloc() &
498  * os_channel_packet_free() and should not be called from different
499  * thread context.
500  */
501 extern int
502 os_channel_packet_pool_purge(const channel_t chd);
503 
504 /*
505  * Retrieve handle to the next available event(s) on the channel.
506  * os_event_get_next_event() can then called on the event handle to
507  * retrieve the individual events from the handle.
508  * Returns 0 on success, ENXIO if the channel is defunct.
509  */
510 extern int
511 os_channel_get_next_event_handle(const channel_t chd,
512     os_channel_event_handle_t *ehandle, os_channel_event_type_t *etype,
513     uint32_t *nevents);
514 
515 /*
516  * Free an event retrieved from the channel.
517  * Returns 0 on success, ENXIO if the channel is defunct.
518  */
519 extern int
520 os_channel_event_free(const channel_t chd, os_channel_event_handle_t ehandle);
521 
522 /*
523  * API to retrieve the latest interface advisory report on the channel.
524  * Returns 0 on succcess. If the return value is EAGAIN, caller can attempt
525  * to retrieve the information again.
526  */
527 extern int
528 os_channel_get_interface_advisory(const channel_t chd,
529     struct ifnet_interface_advisory *advisory);
530 
531 /*
532  * API to configure interface advisory report on the channel.
533  * Returns 0 on succcess.
534  */
535 extern int
536 os_channel_configure_interface_advisory(const channel_t chd, boolean_t enable);
537 
538 extern int
539 os_channel_buflet_alloc(const channel_t chd, buflet_t *bft);
540 
541 extern int
542 os_channel_buflet_free(const channel_t chd, buflet_t ubft);
543 
544 extern int
545 os_channel_get_upp_buffer_stats(const channel_t chd, uint64_t *buffer_total,
546     uint64_t *buffer_inuse);
547 __END_DECLS
548 #endif  /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
549 #else /* KERNEL */
550 /*
551  * Kernel APIs.
552  */
553 
554 /*
555  * Opaque handles.
556  */
557 struct kern_channel;
558 struct __kern_channel_ring;
559 
560 typedef struct kern_channel             *kern_channel_t;
561 typedef struct __kern_channel_ring      *kern_channel_ring_t;
562 typedef struct __slot_desc              *kern_channel_slot_t;
563 
564 /*
565  * Slot properties (deprecated).
566  */
567 struct kern_slot_prop {
568 	uint32_t _sp_pad[16];           /* reserved */
569 } __attribute__((aligned(sizeof(uint64_t))));
570 
571 /*
572  * @struct kern_channel_ring_stat_increment
573  * @abstract Structure used to increment the per ring statistic counters.
574  * @field kcrsi_slots_transferred  number of slots transferred
575  * @filed kcrsi_bytes_transferred  number of bytes transferred
576  */
577 struct kern_channel_ring_stat_increment {
578 	uint32_t        kcrsi_slots_transferred;
579 	uint32_t        kcrsi_bytes_transferred;
580 };
581 
582 /*
583  * Data Movement APIs.
584  *
585  * See block comment above for userland data movement APIs for general
586  * concepts.  The main differences here are the kern_channel_notify()
587  * and kern_channel_reclaim() calls that aren't available for userland.
588  * These calls are typically invoked within the TX and RX sync callbacks
589  * implemented by the nexus provider.
590  *
591  * For TX sync, kern_channel_reclaim() is normally called after the
592  * provider has finished reclaiming slots that have been "transmitted".
593  * In this case, this call is simply a way to indicate to the system
594  * that such condition has happened.
595  *
596  * For RX sync, kern_channel_reclaim() must be called at the beginning
597  * of the callback in order to reclaim user-released slots, and to
598  * ensure that subsequent calls to kern_channel_available_slot_count()
599  * or kern_channel_get_next_slot() operates on the most recent state.
600  *
601  * The kern_channel_notify() is used to post notifications to indicate
602  * slot availability; this may result in the kernel event subsystem
603  * posting readable and writable events.
604  */
605 __BEGIN_DECLS
606 extern uint32_t kern_channel_notify(const kern_channel_ring_t, uint32_t flags);
607 extern uint32_t kern_channel_available_slot_count(
608 	const kern_channel_ring_t ring);
609 /*
610  * NOTE: kern_channel_set_slot_properties(), kern_channel_get_next_slot(),
611  * kern_channel_reclaim() and kern_channel_advance_slot() require that the
612  * caller invokes them from within the sync callback context; they will
613  * assert otherwise.
614  */
615 extern void kern_channel_set_slot_properties(const kern_channel_ring_t,
616     const kern_channel_slot_t slot, const struct kern_slot_prop *prop);
617 extern kern_channel_slot_t kern_channel_get_next_slot(
618 	const kern_channel_ring_t kring, const kern_channel_slot_t slot,
619 	struct kern_slot_prop *slot_prop);
620 extern uint32_t kern_channel_reclaim(const kern_channel_ring_t);
621 extern void kern_channel_advance_slot(const kern_channel_ring_t kring,
622     kern_channel_slot_t slot);
623 
624 /*
625  * Packet.
626  */
627 extern kern_packet_t kern_channel_slot_get_packet(
628 	const kern_channel_ring_t ring, const kern_channel_slot_t slot);
629 
630 /*
631  * NOTE: kern_channel_slot_attach_packet(), kern_channel_slot_detach_packet()
632  * and kern_channel_ring_get_container() require that the caller invokes them
633  * from within the sync callback context; they will assert otherwise.
634  */
635 extern errno_t kern_channel_slot_attach_packet(const kern_channel_ring_t ring,
636     const kern_channel_slot_t slot, kern_packet_t packet);
637 extern errno_t kern_channel_slot_detach_packet(const kern_channel_ring_t ring,
638     const kern_channel_slot_t slot, kern_packet_t packet);
639 extern errno_t kern_channel_ring_get_container(const kern_channel_ring_t ring,
640     kern_packet_t **array, uint32_t *count);
641 extern errno_t kern_channel_tx_refill(const kern_channel_ring_t ring,
642     uint32_t pkt_limit, uint32_t byte_limit, boolean_t tx_doorbell_ctxt,
643     boolean_t *pkts_pending);
644 extern errno_t kern_channel_get_service_class(const kern_channel_ring_t ring,
645     kern_packet_svc_class_t *svc);
646 extern errno_t kern_netif_queue_get_service_class(kern_netif_queue_t,
647     kern_packet_svc_class_t *);
648 
649 /*
650  * Misc.
651  */
652 extern void *kern_channel_get_context(const kern_channel_t channel);
653 extern void *kern_channel_ring_get_context(const kern_channel_ring_t ring);
654 extern void *kern_channel_slot_get_context(const kern_channel_ring_t ring,
655     const kern_channel_slot_t slot);
656 
657 /*
658  * NOTE: kern_channel_increment_ring_{net}_stats() requires
659  * that the caller invokes it from within the sync callback context;
660  * it will assert otherwise.
661  */
662 extern void kern_channel_increment_ring_stats(kern_channel_ring_t ring,
663     struct kern_channel_ring_stat_increment *stats);
664 extern void kern_channel_increment_ring_net_stats(kern_channel_ring_t ring,
665     ifnet_t, struct kern_channel_ring_stat_increment *stats);
666 
667 #ifdef BSD_KERNEL_PRIVATE
668 /* forward declare */
669 struct flowadv_fcentry;
670 
671 /* Flow advisory token */
672 typedef uint32_t flowadv_token_t;
673 
674 /*
675  * Private, unexported KPIs.
676  */
677 __private_extern__ errno_t kern_channel_slot_attach_packet_byidx(
678 	const kern_channel_ring_t kring, const uint32_t sidx, kern_packet_t ph);
679 __private_extern__ errno_t kern_channel_slot_detach_packet_byidx(
680 	const kern_channel_ring_t kring, const uint32_t sidx, kern_packet_t ph);
681 __private_extern__ void kern_channel_flowadv_clear(struct flowadv_fcentry *);
682 __private_extern__ void kern_channel_flowadv_set(struct flowadv_fcentry *);
683 __private_extern__ void kern_channel_flowadv_report_congestion_event(
684 	struct flowadv_fcentry *, uint32_t, uint32_t, uint32_t);
685 __private_extern__ void kern_channel_memstatus(struct proc *, uint32_t,
686     struct kern_channel *);
687 __private_extern__ void kern_channel_defunct(struct proc *,
688     struct kern_channel *);
689 __private_extern__ errno_t kern_channel_tx_refill_canblock(
690 	const kern_channel_ring_t, uint32_t, uint32_t, boolean_t, boolean_t *);
691 #endif /* BSD_KERNEL_PRIVATE */
692 __END_DECLS
693 #endif /* KERNEL */
694 #endif /* PRIVATE */
695 #endif /* !_SKYWALK_OS_CHANNEL_H_ */
696