1 /* 2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _SKYWALK_MEM_SKMEMREGIONVAR_H 30 #define _SKYWALK_MEM_SKMEMREGIONVAR_H 31 32 #ifdef BSD_KERNEL_PRIVATE 33 #include <skywalk/core/skywalk_var.h> 34 #include <skywalk/os_nexus_private.h> 35 36 /* 37 * Segment types. 38 */ 39 typedef enum { 40 SKSEG_TYPE_INVALID = 0, 41 SKSEG_TYPE_ALLOC, /* segment is in skr_hash_table */ 42 SKSEG_TYPE_FREE, /* segment is in skr_segfree */ 43 SKSEG_TYPE_DESTROYED /* in process of being destroyed */ 44 } sksegment_type_t; 45 46 /* 47 * Segment memory states. 48 */ 49 typedef enum { 50 SKSEG_STATE_INVALID = 0, 51 SKSEG_STATE_DETACHED, /* not backed by a IOBMD */ 52 SKSEG_STATE_MAPPED, /* mapped (IOBMD non-volatile) */ 53 SKSEG_STATE_MAPPED_WIRED, /* mapped (IOBMD non-volatile+wired) */ 54 } sksegment_state_t; 55 56 struct skmem_region; 57 58 /* 59 * Segment. 60 * 61 * Segments that are available for use can be found in the doubly-linked 62 * list (skr_seg_free) as well as the red-black tree (skr_seg_tfree). 63 * The latter is used to faciliate finding a segment by its index, which 64 * is required when allocating a segment from a mirrored region. 65 * 66 * Allocated segments are inserted into the allocated-address hash chain; 67 * they don't exist in any tree at that point. 68 */ 69 struct sksegment { 70 TAILQ_ENTRY(sksegment) sg_link; /* sksegment linkage */ 71 RB_ENTRY(sksegment) sg_node; /* sksegment node in tree */ 72 struct skmem_region *sg_region; /* controlling region */ 73 74 /* 75 * If attached to a IOBMD, sg_{start,end} will be valid. 76 */ 77 IOSKMemoryBufferRef sg_md; /* backing IOBMD */ 78 mach_vm_address_t sg_start; /* start address (inclusive) */ 79 mach_vm_address_t sg_end; /* end address (exclusive) */ 80 81 uint32_t sg_index; /* index in skr_seg[] */ 82 sksegment_type_t sg_type; /* segment type */ 83 sksegment_state_t sg_state; /* segment state */ 84 }; 85 86 #define SKSEGMENT_IN_FREELIST(_sg) \ 87 ((_sg)->sg_link.tqe_next != NULL || \ 88 (_sg)->sg_link.tqe_prev != NULL) 89 90 /* 91 * Segment hash bucket. 92 */ 93 struct sksegment_bkt { 94 TAILQ_HEAD(, sksegment) sgb_head; /* sksegment allocated list */ 95 }; 96 97 /* 98 * Region IDs. 99 * 100 * When adding or removing regions, adjust the templates in skmem.c 101 * accordingly. Do not reorder regions without making the appropriate 102 * changes in the code that relies on the existing arena layout. 103 */ 104 typedef enum { 105 /* 106 * The following are user task mappable. 107 * 108 * XXX: When adding new ones, ensure that they get added before 109 * SKMEM_REGION_GUARD_TAIL, and make the appropriate changes in 110 * skmem_region_init(). 111 */ 112 SKMEM_REGION_GUARD_HEAD = 0, /* leading guard page(s) */ 113 SKMEM_REGION_SCHEMA, /* channel layout */ 114 SKMEM_REGION_RING, /* rings */ 115 SKMEM_REGION_BUF, /* rx/tx buffer */ 116 SKMEM_REGION_RXBUF, /* rx only buffers */ 117 SKMEM_REGION_TXBUF, /* tx only buffers */ 118 SKMEM_REGION_UMD, /* userland metadata */ 119 SKMEM_REGION_TXAUSD, /* tx/alloc/event user slot descriptors */ 120 SKMEM_REGION_RXFUSD, /* rx/free user slot descriptors */ 121 SKMEM_REGION_UBFT, /* userland buflet metadata */ 122 SKMEM_REGION_USTATS, /* statistics */ 123 SKMEM_REGION_FLOWADV, /* flow advisories */ 124 SKMEM_REGION_NEXUSADV, /* nexus advisories */ 125 SKMEM_REGION_SYSCTLS, /* sysctl */ 126 SKMEM_REGION_GUARD_TAIL, /* trailing guard page(s) */ 127 128 /* 129 * The following are NOT user task mappable. 130 */ 131 SKMEM_REGION_KMD, /* rx/tx kernel metadata */ 132 SKMEM_REGION_RXKMD, /* rx only kernel metadata */ 133 SKMEM_REGION_TXKMD, /* tx only kernel metadata */ 134 SKMEM_REGION_KBFT, /* rx/tx kernel buflet metadata */ 135 SKMEM_REGION_RXKBFT, /* rx only kernel buflet metadata */ 136 SKMEM_REGION_TXKBFT, /* tx only kernel buflet metadata */ 137 SKMEM_REGION_TXAKSD, /* tx/alloc/event kernel slot descriptors */ 138 SKMEM_REGION_RXFKSD, /* rx/free kernel slot descriptors */ 139 SKMEM_REGION_KSTATS, /* kernel statistics snapshot */ 140 SKMEM_REGION_INTRINSIC, /* intrinsic objects */ 141 142 SKMEM_REGIONS /* max */ 143 } skmem_region_id_t; 144 145 #define SKMEM_PP_REGIONS 11 146 extern const skmem_region_id_t skmem_pp_region_ids[SKMEM_PP_REGIONS]; 147 148 /* 149 * Region parameters structure. Based on requested object parameters, 150 * skmem_region_params_config() will compute the segment parameters as 151 * well as the configured object parameters. 152 */ 153 struct skmem_region_params { 154 /* 155 * Region parameters. 156 */ 157 const char *srp_name; /* (i) region name */ 158 skmem_region_id_t srp_id; /* (i) region identifier */ 159 uint32_t srp_cflags; /* (i) region creation flags */ 160 uint32_t srp_r_seg_size; /* (i) requested seg size */ 161 uint32_t srp_c_seg_size; /* (o) configured seg size */ 162 uint32_t srp_seg_cnt; /* (o) number of segments */ 163 164 /* 165 * Object parameters. 166 */ 167 uint32_t srp_r_obj_size; /* (i) requested obj size */ 168 uint32_t srp_r_obj_cnt; /* (i) requested obj count */ 169 uint32_t srp_c_obj_size; /* (o) configured obj size */ 170 uint32_t srp_c_obj_cnt; /* (o) configured obj count */ 171 size_t srp_align; /* (i) object alignment */ 172 173 /* 174 * SKMEM_REGION_{UMD,KMD} specific parameters. 175 */ 176 nexus_meta_type_t srp_md_type; /* (i) metadata type */ 177 nexus_meta_subtype_t srp_md_subtype; /* (i) metadata subtype */ 178 uint16_t srp_max_frags; /* (i) max frags per packet */ 179 }; 180 181 typedef void (*sksegment_ctor_fn_t)(struct sksegment *, 182 IOSKMemoryBufferRef, void *); 183 typedef void (*sksegment_dtor_fn_t)(struct sksegment *, 184 IOSKMemoryBufferRef, void *); 185 186 /* 187 * Region. 188 */ 189 struct skmem_region { 190 decl_lck_mtx_data(, skr_lock); /* region lock */ 191 192 /* 193 * Statistics. 194 */ 195 uint64_t skr_meminuse; /* memory in use */ 196 uint64_t skr_w_meminuse; /* wired memory in use */ 197 uint64_t skr_memtotal; /* total memory in region */ 198 uint64_t skr_alloc; /* number of allocations */ 199 uint64_t skr_free; /* number of frees */ 200 uint32_t skr_seginuse; /* total unfreed segments */ 201 uint32_t skr_rescale; /* # of hash table rescales */ 202 203 /* 204 * Region properties. 205 */ 206 struct skmem_region_params skr_params; /* region parameters */ 207 #define skr_id skr_params.srp_id /* region ID */ 208 #define skr_cflags skr_params.srp_cflags /* creation flags */ 209 TAILQ_ENTRY(skmem_region) skr_link; /* skmem_region linkage */ 210 char skr_name[64]; /* region name */ 211 uuid_t skr_uuid; /* region uuid */ 212 uint32_t skr_mode; /* skmem_region mode flags */ 213 uint32_t skr_size; /* total region size */ 214 IOSKMemoryBufferSpec skr_bufspec; /* IOSKMemoryBuffer spec */ 215 IOSKRegionSpec skr_regspec; /* IOSKRegion spec */ 216 IOSKRegionRef skr_reg; /* backing IOSKRegion */ 217 struct zone *skr_zreg; /* backing zone (pseudo mode) */ 218 void *skr_private; /* opaque arg to callbacks */ 219 struct skmem_cache *skr_cache; /* client slab/cache layer */ 220 221 /* 222 * Objects. 223 */ 224 #define skr_r_obj_size skr_params.srp_r_obj_size /* requested obj size */ 225 #define skr_r_obj_cnt skr_params.srp_r_obj_cnt /* requested obj count */ 226 #define skr_c_obj_size skr_params.srp_c_obj_size /* configured obj size */ 227 #define skr_c_obj_cnt skr_params.srp_c_obj_cnt /* configured obj count */ 228 #define skr_align skr_params.srp_align /* object alignment */ 229 #define skr_md_type skr_params.srp_md_type /* metadata type */ 230 #define skr_md_subtype skr_params.srp_md_subtype /* metadata subtype */ 231 #define skr_max_frags skr_params.srp_max_frags /* max number of buflets */ 232 233 /* 234 * Segment. 235 */ 236 sksegment_ctor_fn_t skr_seg_ctor; /* segment constructor */ 237 sksegment_dtor_fn_t skr_seg_dtor; /* segment destructor */ 238 uint32_t skr_seg_objs; /* # of objects per segment */ 239 #define skr_seg_size skr_params.srp_c_seg_size /* configured segment size */ 240 #define skr_seg_max_cnt skr_params.srp_seg_cnt /* max # of segments */ 241 uint32_t skr_seg_bmap_len; /* # of skr_seg_bmap */ 242 bitmap_t *skr_seg_bmap; /* segment bitmaps */ 243 uint32_t skr_seg_free_cnt; /* # of free segments */ 244 uint32_t skr_hash_initial; /* initial hash table size */ 245 uint32_t skr_hash_limit; /* hash table size limit */ 246 uint32_t skr_hash_shift; /* get to interesting bits */ 247 uint32_t skr_hash_mask; /* hash table mask */ 248 struct sksegment_bkt *skr_hash_table; /* alloc'd segment htable */ 249 TAILQ_HEAD(segfreehead, sksegment) skr_seg_free; /* free segment list */ 250 RB_HEAD(segtfreehead, sksegment) skr_seg_tfree; /* free tree */ 251 uint32_t skr_seg_waiters; /* # of waiter threads */ 252 253 /* 254 * Region. 255 */ 256 uint32_t skr_refcnt; /* reference count */ 257 258 /* 259 * Mirror. 260 */ 261 struct skmem_region *skr_mirror; 262 }; 263 264 #define SKR_LOCK(_skr) \ 265 lck_mtx_lock(&(_skr)->skr_lock) 266 #define SKR_LOCK_ASSERT_HELD(_skr) \ 267 LCK_MTX_ASSERT(&(_skr)->skr_lock, LCK_MTX_ASSERT_OWNED) 268 #define SKR_LOCK_ASSERT_NOTHELD(_skr) \ 269 LCK_MTX_ASSERT(&(_skr)->skr_lock, LCK_MTX_ASSERT_NOTOWNED) 270 #define SKR_UNLOCK(_skr) \ 271 lck_mtx_unlock(&(_skr)->skr_lock) 272 273 /* valid values for skr_mode */ 274 #define SKR_MODE_NOREDIRECT 0x1 /* unaffect by defunct */ 275 #define SKR_MODE_MMAPOK 0x2 /* can be mapped to user task */ 276 #define SKR_MODE_KREADONLY 0x4 /* kernel read only */ 277 #define SKR_MODE_UREADONLY 0x8 /* if user map, map it read-only */ 278 #define SKR_MODE_PERSISTENT 0x10 /* memory stays non-volatile */ 279 #define SKR_MODE_MONOLITHIC 0x20 /* monolithic region */ 280 #define SKR_MODE_NOMAGAZINES 0x40 /* disable magazines layer */ 281 #define SKR_MODE_NOCACHE 0x80 /* caching-inhibited */ 282 #define SKR_MODE_SEGPHYSCONTIG 0x100 /* phys. contiguous segment */ 283 #define SKR_MODE_SHAREOK 0x200 /* allow object sharing */ 284 #define SKR_MODE_IODIR_IN 0x400 /* I/O direction In */ 285 #define SKR_MODE_IODIR_OUT 0x800 /* I/O direction Out */ 286 #define SKR_MODE_GUARD 0x1000 /* guard pages region */ 287 #define SKR_MODE_PUREDATA 0x2000 /* purely data; no pointers */ 288 #define SKR_MODE_PSEUDO 0x4000 /* external backing store */ 289 #define SKR_MODE_SLAB (1U << 30) /* backend for slab layer */ 290 #define SKR_MODE_MIRRORED (1U << 31) /* controlled by another region */ 291 292 #define SKR_MODE_BITS \ 293 "\020\01NOREDIRECT\02MMAPOK\03KREADONLY\04UREADONLY" \ 294 "\05PERSISTENT\06MONOLITHIC\07NOMAGAZINES\10NOCACHE" \ 295 "\11SEGPHYSCONTIG\012SHAREOK\013IODIR_IN\014IODIR_OUT" \ 296 "\015GUARD\016PUREDATA\017PSEUDO\037SLAB\040MIRRORED" 297 298 /* valid values for skmem_region_create() */ 299 #define SKMEM_REGION_CR_NOREDIRECT 0x1 /* unaffected by defunct */ 300 #define SKMEM_REGION_CR_MMAPOK 0x2 /* can be mapped to user task */ 301 #define SKMEM_REGION_CR_KREADONLY 0x4 /* kernel space readonly */ 302 #define SKMEM_REGION_CR_UREADONLY 0x8 /* if user map, map it RO */ 303 #define SKMEM_REGION_CR_PERSISTENT 0x10 /* memory stays non-volatile */ 304 #define SKMEM_REGION_CR_MONOLITHIC 0x20 /* monolithic region */ 305 #define SKMEM_REGION_CR_NOMAGAZINES 0x40 /* disable magazines layer */ 306 #define SKMEM_REGION_CR_NOCACHE 0x80 /* caching-inhibited */ 307 #define SKMEM_REGION_CR_SEGPHYSCONTIG 0x100 /* phys. contiguous segment */ 308 #define SKMEM_REGION_CR_SHAREOK 0x200 /* allow object sharing */ 309 #define SKMEM_REGION_CR_IODIR_IN 0x400 /* I/O direction in */ 310 #define SKMEM_REGION_CR_IODIR_OUT 0x800 /* I/O direction out */ 311 #define SKMEM_REGION_CR_GUARD 0x1000 /* guard pages region */ 312 #define SKMEM_REGION_CR_PUREDATA 0x2000 /* purely data; no pointers */ 313 #define SKMEM_REGION_CR_PSEUDO 0x4000 /* external backing store */ 314 315 #define SKMEM_REGION_CR_BITS \ 316 "\020\01NOREDIRECT\02MMAPOK\03KREADONLY\04UREADONLY" \ 317 "\05PERSISTENT\06MONOLITHIC\07NOMAGAZINES\10NOCACHE" \ 318 "\11SEGPHYSCONTIG\012SHAREOK\013IODIR_IN\014IODIR_OUT" \ 319 "\015GUARD\016PUREDATA\017PSEUDO" 320 321 __BEGIN_DECLS 322 extern void skmem_region_init(void); 323 extern void skmem_region_fini(void); 324 extern void skmem_region_reap_caches(boolean_t); 325 extern void skmem_region_params_config(struct skmem_region_params *); 326 extern struct skmem_region *skmem_region_create(const char *, 327 struct skmem_region_params *, sksegment_ctor_fn_t, sksegment_dtor_fn_t, 328 void *); 329 extern void skmem_region_mirror(struct skmem_region *, struct skmem_region *); 330 extern void skmem_region_slab_config(struct skmem_region *, 331 struct skmem_cache *); 332 extern void *skmem_region_alloc(struct skmem_region *, void **, 333 struct sksegment **, struct sksegment **, uint32_t); 334 extern void skmem_region_free(struct skmem_region *, void *, void *); 335 extern void skmem_region_retain(struct skmem_region *); 336 extern boolean_t skmem_region_release(struct skmem_region *); 337 extern mach_vm_address_t skmem_region_obj_lookup(struct skmem_region *, 338 uint32_t); 339 extern int skmem_region_get_info(struct skmem_region *, uint32_t *, 340 struct sksegment **); 341 extern boolean_t skmem_region_for_pp(skmem_region_id_t); 342 extern void skmem_region_get_stats(struct skmem_region *, 343 struct sk_stats_region *); 344 #if (DEVELOPMENT || DEBUG) 345 extern uint64_t skmem_region_get_mtbf(void); 346 /* 347 * Reasonable boundaries for MTBF that would make sense for testing, 348 * in milliseconds; why not pick a couple of Mersenne p numbers? 349 */ 350 #define SKMEM_REGION_MTBF_MIN 2 /* almost 2 msec */ 351 #define SKMEM_REGION_MTBF_MAX 3021377 /* almost 1 hour */ 352 extern void skmem_region_set_mtbf(uint64_t); 353 #endif /* (DEVELOPMENT || DEBUG) */ 354 #if SK_LOG 355 extern const char *skmem_region_id2name(skmem_region_id_t); 356 #endif /* SK_LOG */ 357 __END_DECLS 358 #endif /* BSD_KERNEL_PRIVATE */ 359 #endif /* _SKYWALK_MEM_SKMEMVAR_H */ 360