1 /* 2 * Copyright (c) 2016-2022 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _SKYWALK_PACKET_PBUFPOOLVAR_H_ 30 #define _SKYWALK_PACKET_PBUFPOOLVAR_H_ 31 32 #ifdef BSD_KERNEL_PRIVATE 33 #include <skywalk/core/skywalk_var.h> 34 #include <net/droptap.h> 35 36 struct __kern_quantum; 37 struct __kern_packet; 38 39 /* 40 * User packet pool hash bucket. Packets allocated by user space are 41 * kept in the hash table. This allows the kernel to validate whether 42 * or not a given packet object is valid or is already-freed, and thus 43 * take the appropriate measure during internalize. 44 */ 45 struct kern_pbufpool_u_bkt { 46 SLIST_HEAD(, __kern_quantum) upp_head; 47 }; 48 49 struct kern_pbufpool_u_bft_bkt { 50 SLIST_HEAD(, __kern_buflet_ext) upp_head; 51 }; 52 53 #define PBUFPOOL_MAX_BUF_REGIONS 2 54 #define PBUFPOOL_BUF_IDX_DEF 0 55 #define PBUFPOOL_BUF_IDX_LARGE 1 56 57 struct kern_pbufpool { 58 decl_lck_mtx_data(, pp_lock); 59 uint32_t pp_refcnt; 60 uint32_t pp_flags; 61 uint32_t pp_buf_obj_size[PBUFPOOL_MAX_BUF_REGIONS]; 62 uint32_t pp_buf_size[PBUFPOOL_MAX_BUF_REGIONS]; 63 uint16_t pp_max_frags; 64 65 /* 66 * Caches 67 */ 68 struct skmem_cache *pp_buf_cache[PBUFPOOL_MAX_BUF_REGIONS]; 69 struct skmem_cache *pp_kmd_cache; 70 struct skmem_cache *pp_kbft_cache[PBUFPOOL_MAX_BUF_REGIONS]; 71 72 /* 73 * Regions 74 */ 75 struct skmem_region *pp_buf_region[PBUFPOOL_MAX_BUF_REGIONS]; 76 struct skmem_region *pp_kmd_region; 77 struct skmem_region *pp_umd_region; 78 struct skmem_region *pp_ubft_region; 79 struct skmem_region *pp_kbft_region; 80 81 /* 82 * User packet pool: packet metadata hash table 83 */ 84 struct kern_pbufpool_u_bkt *__counted_by(pp_u_hash_table_size) pp_u_hash_table; 85 uint64_t pp_u_bufinuse; 86 87 /* 88 * User packet pool: buflet hash table 89 */ 90 struct kern_pbufpool_u_bft_bkt *__counted_by(pp_u_bft_hash_table_size) pp_u_bft_hash_table; 91 uint64_t pp_u_bftinuse; 92 93 void *pp_ctx; 94 pbuf_ctx_retain_fn_t pp_ctx_retain; 95 pbuf_ctx_release_fn_t pp_ctx_release; 96 nexus_meta_type_t pp_md_type; 97 nexus_meta_subtype_t pp_md_subtype; 98 uint32_t pp_midx_start; 99 uint32_t pp_bidx_start; 100 pbufpool_name_t pp_name; 101 pbuf_seg_ctor_fn_t pp_pbuf_seg_ctor; 102 pbuf_seg_dtor_fn_t pp_pbuf_seg_dtor; 103 104 uint32_t pp_u_hash_table_size; 105 uint32_t pp_u_bft_hash_table_size; 106 }; 107 108 /* valid values for pp_flags */ 109 #define PPF_EXTERNAL 0x1 /* externally configured */ 110 #define PPF_CLOSED 0x2 /* closed; awaiting final destruction */ 111 #define PPF_MONOLITHIC 0x4 /* non slab-based buffer region */ 112 /* buflet is truncated and may not contain the full payload */ 113 #define PPF_TRUNCATED_BUF 0x8 114 #define PPF_KERNEL 0x10 /* kernel only, no user region(s) */ 115 #define PPF_BUFFER_ON_DEMAND 0x20 /* attach buffers to packet on demand */ 116 #define PPF_BATCH 0x40 /* capable of batch alloc/free */ 117 #define PPF_DYNAMIC 0x80 /* capable of magazine resizing */ 118 #define PPF_LARGE_BUF 0x100 /* configured with large buffers */ 119 120 #define PP_KERNEL_ONLY(_pp) \ 121 (((_pp)->pp_flags & PPF_KERNEL) != 0) 122 123 #define PP_HAS_TRUNCATED_BUF(_pp) \ 124 (((_pp)->pp_flags & PPF_TRUNCATED_BUF) != 0) 125 126 #define PP_HAS_BUFFER_ON_DEMAND(_pp) \ 127 (((_pp)->pp_flags & PPF_BUFFER_ON_DEMAND) != 0) 128 129 #define PP_BATCH_CAPABLE(_pp) \ 130 (((_pp)->pp_flags & PPF_BATCH) != 0) 131 132 #define PP_DYNAMIC(_pp) \ 133 (((_pp)->pp_flags & PPF_DYNAMIC) != 0) 134 135 #define PP_HAS_LARGE_BUF(_pp) \ 136 (((_pp)->pp_flags & PPF_LARGE_BUF) != 0) 137 138 #define PP_LOCK(_pp) \ 139 lck_mtx_lock(&_pp->pp_lock) 140 #define PP_LOCK_ASSERT_HELD(_pp) \ 141 LCK_MTX_ASSERT(&_pp->pp_lock, LCK_MTX_ASSERT_OWNED) 142 #define PP_LOCK_ASSERT_NOTHELD(_pp) \ 143 LCK_MTX_ASSERT(&_pp->pp_lock, LCK_MTX_ASSERT_NOTOWNED) 144 #define PP_UNLOCK(_pp) \ 145 lck_mtx_unlock(&_pp->pp_lock) 146 147 #define PP_BUF_SIZE_DEF(_pp) ((_pp)->pp_buf_size[PBUFPOOL_BUF_IDX_DEF]) 148 #define PP_BUF_SIZE_LARGE(_pp) ((_pp)->pp_buf_size[PBUFPOOL_BUF_IDX_LARGE]) 149 150 #define PP_BUF_OBJ_SIZE_DEF(_pp) \ 151 ((_pp)->pp_buf_obj_size[PBUFPOOL_BUF_IDX_DEF]) 152 #define PP_BUF_OBJ_SIZE_LARGE(_pp) \ 153 ((_pp)->pp_buf_obj_size[PBUFPOOL_BUF_IDX_LARGE]) 154 155 #define PP_BUF_REGION_DEF(_pp) ((_pp)->pp_buf_region[PBUFPOOL_BUF_IDX_DEF]) 156 #define PP_BUF_REGION_LARGE(_pp) ((_pp)->pp_buf_region[PBUFPOOL_BUF_IDX_LARGE]) 157 158 #define PP_BUF_CACHE_DEF(_pp) ((_pp)->pp_buf_cache[PBUFPOOL_BUF_IDX_DEF]) 159 #define PP_BUF_CACHE_LARGE(_pp) ((_pp)->pp_buf_cache[PBUFPOOL_BUF_IDX_LARGE]) 160 161 #define PP_KBFT_CACHE_DEF(_pp) ((_pp)->pp_kbft_cache[PBUFPOOL_BUF_IDX_DEF]) 162 #define PP_KBFT_CACHE_LARGE(_pp) ((_pp)->pp_kbft_cache[PBUFPOOL_BUF_IDX_LARGE]) 163 164 __BEGIN_DECLS 165 extern int pp_init(void); 166 extern void pp_fini(void); 167 extern void pp_close(struct kern_pbufpool *); 168 169 /* create flags for pp_create() */ 170 #define PPCREATEF_EXTERNAL 0x1 /* externally requested */ 171 #define PPCREATEF_KERNEL_ONLY 0x2 /* kernel-only */ 172 #define PPCREATEF_TRUNCATED_BUF 0x4 /* compat-only (buf is short) */ 173 #define PPCREATEF_ONDEMAND_BUF 0x8 /* buf alloc/free is decoupled */ 174 #define PPCREATEF_DYNAMIC 0x10 /* dynamic per-CPU magazines */ 175 176 extern struct kern_pbufpool *pp_create(const char *name, 177 struct skmem_region_params srp_array[SKMEM_REGIONS], pbuf_seg_ctor_fn_t buf_seg_ctor, 178 pbuf_seg_dtor_fn_t buf_seg_dtor, const void *ctx, 179 pbuf_ctx_retain_fn_t ctx_retain, pbuf_ctx_release_fn_t ctx_release, 180 uint32_t ppcreatef); 181 extern void pp_destroy(struct kern_pbufpool *); 182 183 extern int pp_init_upp(struct kern_pbufpool *, boolean_t); 184 extern void pp_insert_upp(struct kern_pbufpool *, struct __kern_quantum *, 185 pid_t); 186 extern void pp_insert_upp_locked(struct kern_pbufpool *, 187 struct __kern_quantum *, pid_t); 188 extern void pp_insert_upp_batch(struct kern_pbufpool *pp, pid_t pid, 189 uint64_t *__counted_by(num) array, uint32_t num); 190 extern struct __kern_quantum *pp_remove_upp(struct kern_pbufpool *, obj_idx_t, 191 int *); 192 extern struct __kern_quantum *pp_remove_upp_locked(struct kern_pbufpool *, 193 obj_idx_t, int *); 194 extern struct __kern_quantum *pp_find_upp(struct kern_pbufpool *, obj_idx_t); 195 extern void pp_purge_upp(struct kern_pbufpool *, pid_t); 196 extern struct __kern_buflet *pp_remove_upp_bft(struct kern_pbufpool *, 197 obj_idx_t, int *); 198 extern void pp_insert_upp_bft(struct kern_pbufpool *, struct __kern_buflet *, 199 pid_t); 200 extern boolean_t pp_isempty_upp(struct kern_pbufpool *); 201 202 extern void pp_retain_locked(struct kern_pbufpool *); 203 extern void pp_retain(struct kern_pbufpool *); 204 extern boolean_t pp_release_locked(struct kern_pbufpool *); 205 extern boolean_t pp_release(struct kern_pbufpool *); 206 207 /* flags for pp_regions_params_adjust() */ 208 /* configure packet pool regions for RX only */ 209 #define PP_REGION_CONFIG_BUF_IODIR_IN 0x00000001 210 /* configure packet pool regions for TX only */ 211 #define PP_REGION_CONFIG_BUF_IODIR_OUT 0x00000002 212 /* configure packet pool regions for bidirectional operation */ 213 #define PP_REGION_CONFIG_BUF_IODIR_BIDIR \ 214 (PP_REGION_CONFIG_BUF_IODIR_IN | PP_REGION_CONFIG_BUF_IODIR_OUT) 215 /* configure packet pool metadata regions as persistent (wired) */ 216 #define PP_REGION_CONFIG_MD_PERSISTENT 0x00000004 217 /* configure packet pool buffer regions as persistent (wired) */ 218 #define PP_REGION_CONFIG_BUF_PERSISTENT 0x00000008 219 /* Enable magazine layer (per-cpu caches) for packet pool metadata regions */ 220 #define PP_REGION_CONFIG_MD_MAGAZINE_ENABLE 0x00000010 221 /* configure packet pool regions required for kernel-only operations */ 222 #define PP_REGION_CONFIG_KERNEL_ONLY 0x00000020 223 /* configure packet pool buflet regions */ 224 #define PP_REGION_CONFIG_BUFLET 0x00000040 225 /* configure packet pool buffer region as user read-only */ 226 #define PP_REGION_CONFIG_BUF_UREADONLY 0x00000080 227 /* configure packet pool buffer region as kernel read-only */ 228 #define PP_REGION_CONFIG_BUF_KREADONLY 0x00000100 229 /* configure packet pool buffer region as a single segment */ 230 #define PP_REGION_CONFIG_BUF_MONOLITHIC 0x00000200 231 /* configure packet pool buffer region as physically contiguous segment */ 232 #define PP_REGION_CONFIG_BUF_SEGPHYSCONTIG 0x00000400 233 /* configure packet pool buffer region as cache-inhibiting */ 234 #define PP_REGION_CONFIG_BUF_NOCACHE 0x00000800 235 /* configure packet pool buffer region (backing IOMD) as thread safe */ 236 #define PP_REGION_CONFIG_BUF_THREADSAFE 0x00002000 237 238 extern void pp_regions_params_adjust(struct skmem_region_params srp_array[SKMEM_REGIONS], 239 nexus_meta_type_t, nexus_meta_subtype_t, uint32_t, uint16_t, uint32_t, 240 uint32_t, uint32_t, uint32_t, uint32_t); 241 242 extern uint64_t pp_alloc_packet(struct kern_pbufpool *, uint16_t, uint32_t); 243 extern uint64_t pp_alloc_packet_by_size(struct kern_pbufpool *, uint32_t, 244 uint32_t); 245 extern int pp_alloc_packet_batch(struct kern_pbufpool *, uint16_t, 246 uint64_t *__counted_by(*size), uint32_t *size, boolean_t, alloc_cb_func_t, 247 const void *, uint32_t); 248 extern int pp_alloc_pktq(struct kern_pbufpool *, uint16_t, struct pktq *, 249 uint32_t, alloc_cb_func_t, const void *, uint32_t); 250 extern void pp_free_packet(struct kern_pbufpool *, uint64_t); 251 extern void pp_free_packet_batch(struct kern_pbufpool *, uint64_t *__counted_by(size) array, uint32_t size); 252 extern void pp_free_packet_single(struct __kern_packet *); 253 extern void pp_drop_packet_single(struct __kern_packet *, struct ifnet *, uint16_t, 254 drop_reason_t, const char *, uint16_t); 255 extern void pp_free_packet_chain(struct __kern_packet *, int *); 256 extern void pp_free_pktq(struct pktq *); 257 extern void pp_drop_pktq(struct pktq *, struct ifnet *, uint16_t, 258 drop_reason_t, const char *, uint16_t); 259 extern errno_t pp_alloc_buffer(const kern_pbufpool_t, mach_vm_address_t *, 260 kern_segment_t *, kern_obj_idx_seg_t *, uint32_t); 261 extern void pp_free_buffer(const kern_pbufpool_t, mach_vm_address_t); 262 extern errno_t pp_alloc_buflet(struct kern_pbufpool *pp, kern_buflet_t *kbft, 263 uint32_t skmflag, bool large); 264 extern errno_t pp_alloc_buflet_batch(struct kern_pbufpool *pp, 265 uint64_t *__counted_by(*size) array, uint32_t *size, uint32_t skmflag, 266 bool large); 267 extern void pp_free_buflet(const kern_pbufpool_t, kern_buflet_t); 268 extern void pp_reap_caches(boolean_t); 269 __END_DECLS 270 #endif /* BSD_KERNEL_PRIVATE */ 271 #endif /* !_SKYWALK_PACKET_PBUFPOOLVAR_H_ */ 272