xref: /xnu-8020.121.3/bsd/skywalk/channel/os_channel.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_OS_CHANNEL_H_
30 #define _SKYWALK_OS_CHANNEL_H_
31 
32 #ifdef PRIVATE
33 
34 #include <stdint.h>
35 #include <sys/types.h>
36 #include <sys/cdefs.h>
37 #include <uuid/uuid.h>
38 #include <mach/vm_types.h>
39 #include <skywalk/os_nexus.h>
40 #include <skywalk/os_packet.h>
41 #ifndef KERNEL
42 #include <skywalk/os_channel_event.h>
43 #include <net/if_var.h>
44 #endif /* !KERNEL */
45 
46 /*
47  * Indicates that channel supports "CHANNEL_ATTR_NUM_BUFFERS" attribute.
48  * used by Libnetcore.
49  */
50 #define OS_CHANNEL_HAS_NUM_BUFFERS_ATTR 1
51 
52 /* Flow advisory table index */
53 typedef uint32_t flowadv_idx_t;
54 #define FLOWADV_IDX_NONE                ((flowadv_idx_t)-1)
55 
56 /*
57  * Channel ring direction.
58  */
59 typedef enum {
60 	CHANNEL_DIR_TX_RX,      /* default: TX and RX ring(s) */
61 	CHANNEL_DIR_TX,         /* (monitor) only TX ring(s) */
62 	CHANNEL_DIR_RX          /* (monitor) only RX ring(s) */
63 } ring_dir_t;
64 
65 /*
66  * Channel ring ID.
67  */
68 typedef uint32_t ring_id_t;
69 #define CHANNEL_RING_ID_ANY             ((ring_id_t)-1)
70 
71 typedef enum {
72 	CHANNEL_FIRST_TX_RING,
73 	CHANNEL_LAST_TX_RING,
74 	CHANNEL_FIRST_RX_RING,
75 	CHANNEL_LAST_RX_RING
76 } ring_id_type_t;
77 
78 /* Sync mode values */
79 typedef enum {
80 	CHANNEL_SYNC_TX,        /* synchronize TX ring(s) */
81 	CHANNEL_SYNC_RX,        /* synchronize RX ring(s) */
82 #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE)
83 	CHANNEL_SYNC_UPP        /* synchronize packet pool rings only */
84 #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */
85 } sync_mode_t;
86 
87 /* Sync flags */
88 typedef uint32_t sync_flags_t;
89 #if defined(LIBSYSCALL_INTERFACE) || defined(BSD_KERNEL_PRIVATE)
90 #define CHANNEL_SYNCF_ALLOC        0x1     /* synchronize alloc ring */
91 #define CHANNEL_SYNCF_FREE         0x2     /* synchronize free ring */
92 #define CHANNEL_SYNCF_PURGE        0x4     /* purge user packet pool */
93 #define CHANNEL_SYNCF_ALLOC_BUF    0x8     /* synchronize buflet alloc ring */
94 #endif /* LIBSYSCALL_INTERFACE || BSD_KERNEL_PRIVATE */
95 
96 /*
97  * Opaque handles.
98  */
99 struct channel;
100 struct channel_ring_desc;
101 struct __slot_desc;
102 struct channel_attr;
103 
104 typedef struct channel                  *channel_t;
105 typedef struct channel_ring_desc        *channel_ring_t;
106 typedef struct __slot_desc              *channel_slot_t;
107 typedef struct channel_attr             *channel_attr_t;
108 
109 /*
110  * Channel monitor types.
111  */
112 typedef enum {
113 	CHANNEL_MONITOR_OFF,            /* default */
114 	CHANNEL_MONITOR_NO_COPY,        /* zero-copy (delayed) mode */
115 	CHANNEL_MONITOR_COPY            /* copy (immediate) mode */
116 } channel_monitor_type_t;
117 
118 /*
119  * Channel threshold unit types.
120  */
121 typedef enum {
122 	CHANNEL_THRESHOLD_UNIT_SLOTS,   /* unit in slots (default) */
123 	CHANNEL_THRESHOLD_UNIT_BYTES,   /* unit in bytes */
124 } channel_threshold_unit_t;
125 
126 /*
127  * Channel attribute types gettable/settable via os_channel_attr_{get,set}.
128  *
129  *     g: retrievable at any time
130  *     s: settable at any time
131  *     S: settable once, only at creation time
132  */
133 typedef enum {
134 	CHANNEL_ATTR_TX_RINGS,          /* (g) # of transmit rings */
135 	CHANNEL_ATTR_RX_RINGS,          /* (g) # of receive rings */
136 	CHANNEL_ATTR_TX_SLOTS,          /* (g) # of slots per transmit ring */
137 	CHANNEL_ATTR_RX_SLOTS,          /* (g) # of slots per receive ring */
138 	CHANNEL_ATTR_SLOT_BUF_SIZE,     /* (g) buffer per slot (bytes) */
139 	CHANNEL_ATTR_SLOT_META_SIZE,    /* (g) metadata per slot (bytes) */
140 	CHANNEL_ATTR_EXCLUSIVE,         /* (g/s) bool: exclusive open */
141 	CHANNEL_ATTR_NO_AUTO_SYNC,      /* (g/s) bool: will do explicit sync */
142 	CHANNEL_ATTR_MONITOR,           /* (g/s) see channel_monitor_type_t */
143 	CHANNEL_ATTR_TX_LOWAT_UNIT,     /* (g/s) see channel_threshold_unit_t */
144 	CHANNEL_ATTR_TX_LOWAT_VALUE,    /* (g/s) transmit low-watermark */
145 	CHANNEL_ATTR_RX_LOWAT_UNIT,     /* (g/s) see channel_threshold_unit_t */
146 	CHANNEL_ATTR_RX_LOWAT_VALUE,    /* (g/s) receive low-watermark */
147 	CHANNEL_ATTR_NEXUS_TYPE,        /* (g) nexus type */
148 	CHANNEL_ATTR_NEXUS_EXTENSIONS,  /* (g) nexus extension(s) */
149 	CHANNEL_ATTR_NEXUS_MHINTS,      /* (g) nexus memory usage hints */
150 	CHANNEL_ATTR_TX_HOST_RINGS,     /* (g) # of transmit host rings */
151 	CHANNEL_ATTR_RX_HOST_RINGS,     /* (g) # of receive host rings */
152 	CHANNEL_ATTR_NEXUS_IFINDEX,     /* (g) nexus network interface index */
153 	CHANNEL_ATTR_NEXUS_STATS_SIZE,  /* (g) nexus statistics region size */
154 	CHANNEL_ATTR_NEXUS_FLOWADV_MAX, /* (g) # of flow advisory entries */
155 	CHANNEL_ATTR_NEXUS_META_TYPE,   /* (g) nexus metadata type */
156 	CHANNEL_ATTR_NEXUS_META_SUBTYPE, /* (g) nexus metadata subtype */
157 	CHANNEL_ATTR_NEXUS_CHECKSUM_OFFLOAD, /* (g) nexus checksum offload */
158 	CHANNEL_ATTR_USER_PACKET_POOL,  /* (g/s) bool: use user packet pool */
159 	CHANNEL_ATTR_NEXUS_ADV_SIZE,    /* (g) nexus advisory region size */
160 	CHANNEL_ATTR_NEXUS_DEFUNCT_OK,  /* (g/s) bool: allow defunct */
161 	CHANNEL_ATTR_FILTER,            /* (g/s) bool: filter mode */
162 	CHANNEL_ATTR_EVENT_RING,        /* (g/s) bool: enable event ring */
163 	CHANNEL_ATTR_MAX_FRAGS,         /* (g) max length of buflet chain */
164 	CHANNEL_ATTR_NUM_BUFFERS,       /* (g) # of buffers in user pool */
165 	CHANNEL_ATTR_LOW_LATENCY,       /* (g/s) bool: low latency channel */
166 } channel_attr_type_t;
167 
168 /*
169  * Channel nexus metadata type.
170  */
171 typedef enum {
172 	CHANNEL_NEXUS_META_TYPE_INVALID = 0,
173 	CHANNEL_NEXUS_META_TYPE_QUANTUM, /* OK for os_packet quantum APIs */
174 	CHANNEL_NEXUS_META_TYPE_PACKET,  /* OK for all os_packet APIs */
175 } channel_nexus_meta_type_t;
176 
177 /*
178  * Channel nexus metadata subtype.
179  */
180 typedef enum {
181 	CHANNEL_NEXUS_META_SUBTYPE_INVALID = 0,
182 	CHANNEL_NEXUS_META_SUBTYPE_PAYLOAD,
183 	CHANNEL_NEXUS_META_SUBTYPE_RAW,
184 } channel_nexus_meta_subtype_t;
185 
186 /*
187  * Valid values for CHANNEL_ATTR_NEXUS_CHECKSUM_OFFLOAD
188  */
189 #define CHANNEL_NEXUS_CHECKSUM_PARTIAL  0x1     /* partial checksum */
190 
191 /*
192  * Channel statistics ID.
193  */
194 typedef enum {
195 	CHANNEL_STATS_ID_IP = 0,        /* struct ip_stats */
196 	CHANNEL_STATS_ID_IP6,           /* struct ip6_stats */
197 	CHANNEL_STATS_ID_TCP,           /* struct tcp_stats */
198 	CHANNEL_STATS_ID_UDP,           /* struct udp_stats */
199 	CHANNEL_STATS_ID_QUIC,          /* struct quic_stats */
200 } channel_stats_id_t;
201 
202 /*
203  * Slot properties.  Structure is aligned to allow for efficient copy.
204  *
205  * Fields except for sp_{flags,len} are immutables (I).  The system will
206  * verify for correctness during os_channel_put() across the immutable
207  * fields, and will abort the process if it detects inconsistencies.
208  * This is meant to help with debugging, since it indicates bugs and/or
209  * memory corruption.
210  */
211 typedef struct slot_prop {
212 	uint16_t sp_flags;              /* private flags */
213 	uint16_t sp_len;                /* length for this slot */
214 	uint32_t sp_idx;                /* (I) slot index */
215 	mach_vm_address_t sp_ext_ptr;   /* (I) pointer for indirect buffer */
216 	mach_vm_address_t sp_buf_ptr;   /* (I) pointer for native buffer */
217 	mach_vm_address_t sp_mdata_ptr; /* (I) pointer for metadata */
218 	uint32_t _sp_pad[8];            /* reserved */
219 } slot_prop_t __attribute__((aligned(sizeof(uint64_t))));
220 
221 #ifndef KERNEL
222 /*
223  * User APIs.
224  */
225 #if !defined(_POSIX_C_SOURCE) || defined(_DARWIN_C_SOURCE)
226 __BEGIN_DECLS
227 /*
228  * Creates a Channel attribute object.
229  *
230  * This must be paired with a os_channel_attr_destroy() on the handle.
231  */
232 extern channel_attr_t os_channel_attr_create(void);
233 
234 /*
235  * Clones a Channel attribute object.  If source attribute is NULL
236  * it behaves just like os_channel_attr_create();
237  *
238  * This must be paired with a os_channel_attr_destroy() on the handle.
239  */
240 extern channel_attr_t os_channel_attr_clone(const channel_attr_t);
241 
242 /*
243  * Sets a value for a given attribute type on a Channel attribute object.
244  */
245 extern int os_channel_attr_set(const channel_attr_t attr,
246     const channel_attr_type_t type, const uint64_t value);
247 
248 /*
249  * Sets a key blob on a Channel attribute object.  Existing key blob
250  * information in the attribute object will be removed, if any, and
251  * replaced with the new key blob.  Specifying 0 for key_len will
252  * clear the key stored in the attribute object.  The maximum key
253  * length is specified by NEXUS_MAX_KEY_LEN.
254  */
255 extern int os_channel_attr_set_key(const channel_attr_t attr,
256     const void *key, const uint32_t key_len);
257 
258 /*
259  * Gets a value for a given attribute type on a Channel attribute object.
260  */
261 extern int os_channel_attr_get(const channel_attr_t attr,
262     const channel_attr_type_t type, uint64_t *value);
263 
264 /*
265  * Gets a key blob on a Channel attribute object.  If key is NULL,
266  * returns the length of the key blob with key_len, so caller knows
267  * how much to allocate space for key blob.
268  */
269 extern int os_channel_attr_get_key(const channel_attr_t attr,
270     void *key, uint32_t *key_len);
271 
272 /*
273  * Destroys a Channel attribute object, along with all resources
274  * associated with it (e.g. key blob).
275  */
276 extern void os_channel_attr_destroy(const channel_attr_t attr);
277 
278 /*
279  * Opens a Channel to a Nexus provider instance.  Upon success, maps memory
280  * region and allocates resources.
281  *
282  * This must be paired with a os_channel_destroy() on the handle, in order to
283  * unmap the memory region and free resources.
284  */
285 extern channel_t os_channel_create(const uuid_t uuid, const nexus_port_t port);
286 
287 /*
288  * Extended version of os_channel_create().
289  */
290 extern channel_t os_channel_create_extended(const uuid_t uuid,
291     const nexus_port_t port, const ring_dir_t dir, const ring_id_t rid,
292     const channel_attr_t attr);
293 
294 /*
295  * Retrieves the file descriptor associated with the Channel.
296  */
297 extern int os_channel_get_fd(const channel_t channel);
298 
299 /*
300  * Retrieves current channel attributes into the channel_attr_t handle.
301  */
302 extern int os_channel_read_attr(const channel_t channel, channel_attr_t attr);
303 
304 /*
305  * Updates channel attributes based on those referred to by the channel_attr_t
306  * handle.  See comments above on channel_attr_type_t; this routine will only
307  * update attributes that are marked with 's' but not 'S'.
308  */
309 extern int os_channel_write_attr(const channel_t channel, channel_attr_t attr);
310 
311 /*
312  * Retrieves channel's associated nexus type into *nexus_type, and the
313  * provider-specific extension attribute into *ext.
314  */
315 extern int os_channel_read_nexus_extension_info(const channel_t channel,
316     nexus_type_t *nexus_type, uint64_t *ext);
317 
318 /*
319  * Non-blocking synchronization.  Channel handle may also be used
320  * with kqueue(2), select(2) or poll(2) through the file descriptor.
321  */
322 extern int os_channel_sync(const channel_t channel, const sync_mode_t mode);
323 
324 /*
325  * Destroys a Channel.
326  */
327 extern void os_channel_destroy(const channel_t channel);
328 
329 /*
330  * Checks if a channel is defunct.  Returns non-zero if defunct.
331  */
332 extern int os_channel_is_defunct(const channel_t channel);
333 
334 /*
335  * Data Movement APIs.
336  *
337  * Obtain channel_ring_t handle via os_channel_{tx,rx}_ring().  You will
338  * need to specify the ring_id_t which identifies the ring — this is true
339  * even for a single TX/RX ring case.  The Nexus provider can communicate
340  * to the client the ID of the TX and RX ring that should be used to
341  * communicate to it, through a contract between the two.  For instance,
342  * it can tell the client to use first TX ring and first RX ring, etc.
343  * through some side-channel.  It should not assume 0 or any other number
344  * as ID, however, as the in-kernel Nexus object is the authoritative source
345  * of truth.  This is where the os_channel_ring_id() call comes into the
346  * picture, as it will return the first and last usable TX and RX ring IDs
347  * for the Channel opened to that Nexus object.
348  *
349  * Once the TX or RX ring handle is obtained above, the client can ask for
350  * the first usable slot in the ring through os_channel_get_next_slot()
351  * passing NULL for the 'slot' parameter. This returns a channel_slot_t
352  * handle that represents the slot, along with the properties of that slot
353  * described by the slot_prop_t structure. If no slots are available, this
354  * call returns a NULL handle.  It’s important to note that this
355  * call does NOT advance the ring’s current slot pointer; calling this
356  * multiple times in succession will yield the same result.
357  *
358  * The client proceeds to use the slot by examining the returned
359  * slot_prop_t fields including the pointer to the internal buffer
360  * associated with that slot.  Once the client is finished, it updates
361  * the relevant slot_prop_t fields (e.g. length) and calls
362  * os_channel_set_slot_properties() to apply them to the slot.
363  *
364  * To get the next slot, the client provides the non-NULL slot value obtained
365  * from the previous call to os_channel_get_next_slot() as the 'slot' parameter
366  * in its next invocation of that function.
367  *
368  * To advance the ring’s current pointer, the client invokes
369  * os_channel_advance_slot() specifying the slot to advance past. If the slot
370  * is invalid, this function returns a non-zero value.
371  *
372  * Once the client is ready to commit, call os_channel_sync() in
373  * either/all directions.
374  */
375 extern ring_id_t os_channel_ring_id(const channel_t channel,
376     const ring_id_type_t type);
377 extern channel_ring_t os_channel_tx_ring(const channel_t channel,
378     const ring_id_t rid);
379 extern channel_ring_t os_channel_rx_ring(const channel_t channel,
380     const ring_id_t rid);
381 extern int os_channel_pending(const channel_ring_t ring);
382 
383 /*
384  * This returns a nexus-specific timestamp in nanoseconds taken at the
385  * lasttime os_channel_sync() or its equivalent implicit kevent sync
386  * was called
387  */
388 extern uint64_t os_channel_ring_sync_time(const channel_ring_t ring);
389 
390 /*
391  * This returns a nexus-specific timestamp in nanoseconds to indicate
392  * the time of last activity on the opposite end of the ring.
393  * This is only updated when sync or kevent equivalent is called.
394  */
395 extern uint64_t os_channel_ring_notify_time(const channel_ring_t ring);
396 
397 /*
398  * For TX ring os_channel_available_slot_count() returns the minimum number
399  * of slots available availble for TX, and it is possible that
400  * os_channel_get_next_slot() will return more slots than the what was
401  * returned by an earlier call to os_channel_available_slot_count()
402  */
403 extern uint32_t os_channel_available_slot_count(const channel_ring_t ring);
404 extern channel_slot_t os_channel_get_next_slot(const channel_ring_t ring,
405     const channel_slot_t slot, slot_prop_t *prop);
406 extern int os_channel_advance_slot(channel_ring_t ring,
407     const channel_slot_t slot);
408 extern void os_channel_set_slot_properties(const channel_ring_t ring,
409     const channel_slot_t slot, const slot_prop_t *prop);
410 
411 /*
412  * Return the packet handle associated with a given slot of a ring.
413  */
414 extern packet_t os_channel_slot_get_packet(const channel_ring_t ring,
415     const channel_slot_t slot);
416 
417 /*
418  * Each nexus that the channel is connected to determines whether or
419  * not there is a shareable statistics region identified by one of
420  * the channel_stats_id_t values.  This routine returns a pointer to
421  * such a region upon success, or NULL if not supported by the nexus.
422  */
423 extern void *os_channel_get_stats_region(const channel_t channel,
424     const channel_stats_id_t id);
425 
426 /*
427  * Each nexus that the channel is connected to determines whether or
428  * not there is a nexus-wide advisory region.  This routine returns
429  * a pointer to such a region upon success, or NULL if not supported
430  * by the nexus.
431  */
432 extern void *os_channel_get_advisory_region(const channel_t channel);
433 
434 /*
435  * Each nexus that supports flow admission control may be queried to
436  * advise whether or not the channel is willing to admit more packets
437  * for a given flow.  A return value of 0 indicates that the packets
438  * for the flow are admissible.  If ENOBUFS is returned, the flow is
439  * currently suspended, and further attempts to send more packets on
440  * the ring may result in drops.  Any other error values indicate
441  * that either the nexus doesn't support admission control, or the
442  * arguments aren't valid.
443  */
444 extern int os_channel_flow_admissible(const channel_ring_t ring,
445     uuid_t flow_id, const flowadv_idx_t flow_index);
446 
447 /*
448  * Allocate a packet from the channel's packet pool.
449  * Returns 0 on success with the packet handle in packet arg.
450  * Note: os_channel_packet_alloc() & os_channel_packet_free() should be
451  * serialized and should not be called from the different thread context.
452  */
453 extern int
454 os_channel_packet_alloc(const channel_t chd, packet_t *packet);
455 
456 /*
457  * Free a packet allocated from the channel's packet pool.
458  * Returns 0 on success
459  * Note: os_channel_packet_alloc() & os_channel_packet_free() should be
460  * serialized and should not be called from the different thread context.
461  */
462 extern int
463 os_channel_packet_free(const channel_t chd, packet_t packet);
464 
465 /*
466  * Attach the given packet to a channel slot
467  */
468 extern int
469 os_channel_slot_attach_packet(const channel_ring_t chrd,
470     const channel_slot_t slot, packet_t packet);
471 
472 /*
473  * Detach a given packet from a channel slot
474  */
475 extern int
476 os_channel_slot_detach_packet(const channel_ring_t chrd,
477     const channel_slot_t slot, packet_t packet);
478 
479 /*
480  * purge packets from the channel's packet pool.
481  * This API should be called at regular intervals by application to purge
482  * unused packets from the channel's packet pool. Recommended interval is
483  * 11 seconds.
484  * Returns 0 on success.
485  * Note: This call should be serialized with os_channel_packet_alloc() &
486  * os_channel_packet_free() and should not be called from different
487  * thread context.
488  */
489 extern int
490 os_channel_packet_pool_purge(const channel_t chd);
491 
492 /*
493  * Retrieve handle to the next available event(s) on the channel.
494  * os_event_get_next_event() can then called on the event handle to
495  * retrieve the individual events from the handle.
496  * Returns 0 on success, ENXIO if the channel is defunct.
497  */
498 extern int
499 os_channel_get_next_event_handle(const channel_t chd,
500     os_channel_event_handle_t *ehandle, os_channel_event_type_t *etype,
501     uint32_t *nevents);
502 
503 /*
504  * Free an event retrieved from the channel.
505  * Returns 0 on success, ENXIO if the channel is defunct.
506  */
507 extern int
508 os_channel_event_free(const channel_t chd, os_channel_event_handle_t ehandle);
509 
510 /*
511  * API to retrieve the latest interface advisory report on the channel.
512  * Returns 0 on succcess. If the return value is EAGAIN, caller can attempt
513  * to retrieve the information again.
514  */
515 extern int
516 os_channel_get_interface_advisory(const channel_t chd,
517     struct ifnet_interface_advisory *advisory);
518 
519 /*
520  * API to configure interface advisory report on the channel.
521  * Returns 0 on succcess.
522  */
523 extern int
524 os_channel_configure_interface_advisory(const channel_t chd, boolean_t enable);
525 
526 extern int
527 os_channel_buflet_alloc(const channel_t chd, buflet_t *bft);
528 
529 extern int
530 os_channel_buflet_free(const channel_t chd, buflet_t ubft);
531 __END_DECLS
532 #endif  /* (!_POSIX_C_SOURCE || _DARWIN_C_SOURCE) */
533 #else /* KERNEL */
534 /*
535  * Kernel APIs.
536  */
537 
538 /*
539  * Opaque handles.
540  */
541 struct kern_channel;
542 struct __kern_channel_ring;
543 
544 typedef struct kern_channel             *kern_channel_t;
545 typedef struct __kern_channel_ring      *kern_channel_ring_t;
546 typedef struct __slot_desc              *kern_channel_slot_t;
547 
548 /*
549  * Slot properties (deprecated).
550  */
551 struct kern_slot_prop {
552 	uint32_t _sp_pad[16];           /* reserved */
553 } __attribute__((aligned(sizeof(uint64_t))));
554 
555 /*
556  * @struct kern_channel_ring_stat_increment
557  * @abstract Structure used to increment the per ring statistic counters.
558  * @field kcrsi_slots_transferred  number of slots transferred
559  * @filed kcrsi_bytes_transferred  number of bytes transferred
560  */
561 struct kern_channel_ring_stat_increment {
562 	uint32_t        kcrsi_slots_transferred;
563 	uint32_t        kcrsi_bytes_transferred;
564 };
565 
566 /*
567  * Data Movement APIs.
568  *
569  * See block comment above for userland data movement APIs for general
570  * concepts.  The main differences here are the kern_channel_notify()
571  * and kern_channel_reclaim() calls that aren't available for userland.
572  * These calls are typically invoked within the TX and RX sync callbacks
573  * implemented by the nexus provider.
574  *
575  * For TX sync, kern_channel_reclaim() is normally called after the
576  * provider has finished reclaiming slots that have been "transmitted".
577  * In this case, this call is simply a way to indicate to the system
578  * that such condition has happened.
579  *
580  * For RX sync, kern_channel_reclaim() must be called at the beginning
581  * of the callback in order to reclaim user-released slots, and to
582  * ensure that subsequent calls to kern_channel_available_slot_count()
583  * or kern_channel_get_next_slot() operates on the most recent state.
584  *
585  * The kern_channel_notify() is used to post notifications to indicate
586  * slot availability; this may result in the kernel event subsystem
587  * posting readable and writable events.
588  */
589 __BEGIN_DECLS
590 extern uint32_t kern_channel_notify(const kern_channel_ring_t, uint32_t flags);
591 extern uint32_t kern_channel_available_slot_count(
592 	const kern_channel_ring_t ring);
593 /*
594  * NOTE: kern_channel_set_slot_properties(), kern_channel_get_next_slot(),
595  * kern_channel_reclaim() and kern_channel_advance_slot() require that the
596  * caller invokes them from within the sync callback context; they will
597  * assert otherwise.
598  */
599 extern void kern_channel_set_slot_properties(const kern_channel_ring_t,
600     const kern_channel_slot_t slot, const struct kern_slot_prop *prop);
601 extern kern_channel_slot_t kern_channel_get_next_slot(
602 	const kern_channel_ring_t kring, const kern_channel_slot_t slot,
603 	struct kern_slot_prop *slot_prop);
604 extern uint32_t kern_channel_reclaim(const kern_channel_ring_t);
605 extern void kern_channel_advance_slot(const kern_channel_ring_t kring,
606     kern_channel_slot_t slot);
607 
608 /*
609  * Packet.
610  */
611 extern kern_packet_t kern_channel_slot_get_packet(
612 	const kern_channel_ring_t ring, const kern_channel_slot_t slot);
613 
614 /*
615  * NOTE: kern_channel_slot_attach_packet(), kern_channel_slot_detach_packet()
616  * and kern_channel_ring_get_container() require that the caller invokes them
617  * from within the sync callback context; they will assert otherwise.
618  */
619 extern errno_t kern_channel_slot_attach_packet(const kern_channel_ring_t ring,
620     const kern_channel_slot_t slot, kern_packet_t packet);
621 extern errno_t kern_channel_slot_detach_packet(const kern_channel_ring_t ring,
622     const kern_channel_slot_t slot, kern_packet_t packet);
623 extern errno_t kern_channel_ring_get_container(const kern_channel_ring_t ring,
624     kern_packet_t **array, uint32_t *count);
625 extern errno_t kern_channel_tx_refill(const kern_channel_ring_t ring,
626     uint32_t pkt_limit, uint32_t byte_limit, boolean_t tx_doorbell_ctxt,
627     boolean_t *pkts_pending);
628 extern errno_t kern_channel_get_service_class(const kern_channel_ring_t ring,
629     kern_packet_svc_class_t *svc);
630 extern errno_t kern_netif_queue_get_service_class(kern_netif_queue_t,
631     kern_packet_svc_class_t *);
632 
633 /*
634  * Misc.
635  */
636 extern void *kern_channel_get_context(const kern_channel_t channel);
637 extern void *kern_channel_ring_get_context(const kern_channel_ring_t ring);
638 extern void *kern_channel_slot_get_context(const kern_channel_ring_t ring,
639     const kern_channel_slot_t slot);
640 
641 /*
642  * NOTE: kern_channel_increment_ring_{net}_stats() requires
643  * that the caller invokes it from within the sync callback context;
644  * it will assert otherwise.
645  */
646 extern void kern_channel_increment_ring_stats(kern_channel_ring_t ring,
647     struct kern_channel_ring_stat_increment *stats);
648 extern void kern_channel_increment_ring_net_stats(kern_channel_ring_t ring,
649     ifnet_t, struct kern_channel_ring_stat_increment *stats);
650 
651 #ifdef BSD_KERNEL_PRIVATE
652 /* forward declare */
653 struct flowadv_fcentry;
654 
655 /* Flow advisory token */
656 typedef uint32_t flowadv_token_t;
657 
658 /*
659  * Private, unexported KPIs.
660  */
661 __private_extern__ errno_t kern_channel_slot_attach_packet_byidx(
662 	const kern_channel_ring_t kring, const uint32_t sidx, kern_packet_t ph);
663 __private_extern__ errno_t kern_channel_slot_detach_packet_byidx(
664 	const kern_channel_ring_t kring, const uint32_t sidx, kern_packet_t ph);
665 __private_extern__ void kern_channel_flowadv_clear(struct flowadv_fcentry *);
666 __private_extern__ void kern_channel_memstatus(struct proc *, uint32_t,
667     struct kern_channel *);
668 __private_extern__ void kern_channel_defunct(struct proc *,
669     struct kern_channel *);
670 __private_extern__ errno_t kern_channel_tx_refill_canblock(
671 	const kern_channel_ring_t, uint32_t, uint32_t, boolean_t, boolean_t *);
672 #endif /* BSD_KERNEL_PRIVATE */
673 __END_DECLS
674 #endif /* KERNEL */
675 #endif /* PRIVATE */
676 #endif /* !_SKYWALK_OS_CHANNEL_H_ */
677