1 /* 2 * Copyright (c) 2006-2019 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 #ifndef _SYS_MCACHE_H 29 #define _SYS_MCACHE_H 30 31 #ifdef KERNEL_PRIVATE 32 33 #ifdef __cplusplus 34 extern "C" { 35 #endif 36 37 #include <sys/types.h> 38 #include <sys/queue.h> 39 #include <mach/boolean.h> 40 #include <kern/locks.h> 41 #include <libkern/OSAtomic.h> 42 43 #ifdef ASSERT 44 #undef ASSERT 45 #endif 46 47 #ifdef VERIFY 48 #undef VERIFY 49 #endif 50 51 /* 52 * Unlike VERIFY(), ASSERT() is evaluated only in DEBUG/DEVELOPMENT build. 53 */ 54 #define VERIFY(EX) \ 55 ((void)(__probable((EX)) || assfail(#EX, __FILE__, __LINE__))) 56 #if (DEBUG || DEVELOPMENT) 57 #define ASSERT(EX) VERIFY(EX) 58 #else 59 #define ASSERT(EX) ((void)0) 60 #endif 61 62 /* 63 * Compile time assert; this should be on its own someday. 64 */ 65 #define _CASSERT(x) _Static_assert(x, "compile-time assertion failed") 66 67 /* 68 * Use CPU_CACHE_LINE_SIZE instead of MAX_CPU_CACHE_LINE_SIZE, unless 69 * wasting space is of no concern. 70 */ 71 #define MAX_CPU_CACHE_LINE_SIZE 128 72 #define CPU_CACHE_LINE_SIZE mcache_cache_line_size() 73 74 #ifndef IS_P2ALIGNED 75 #define IS_P2ALIGNED(v, a) \ 76 ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0) 77 #endif /* IS_P2ALIGNED */ 78 79 #ifndef P2ROUNDUP 80 #define P2ROUNDUP(x, align) \ 81 (-(-((uintptr_t)(x)) & -((uintptr_t)align))) 82 #endif /* P2ROUNDUP */ 83 84 #ifndef P2ROUNDDOWN 85 #define P2ROUNDDOWN(x, align) \ 86 (((uintptr_t)(x)) & ~((uintptr_t)(align) - 1)) 87 #endif /* P2ROUNDDOWN */ 88 89 #ifndef P2ALIGN 90 #define P2ALIGN(x, align) \ 91 ((uintptr_t)(x) & -((uintptr_t)(align))) 92 #endif /* P2ALIGN */ 93 94 #define MCACHE_FREE_PATTERN 0xdeadbeefdeadbeefULL 95 #define MCACHE_UNINITIALIZED_PATTERN 0xbaddcafebaddcafeULL 96 97 /* 98 * mcache allocation request flags. 99 * 100 * MCR_NOSLEEP and MCR_FAILOK are mutually exclusive. The latter is used 101 * by the mbuf allocator to handle the implementation of several caches that 102 * involve multiple layers of mcache. It implies a best effort blocking 103 * allocation request; if the request cannot be satisfied, the caller will 104 * be blocked until further notice, similar to MCR_SLEEP, except that upon 105 * a wake up it will return immediately to the caller regardless of whether 106 * the request can been fulfilled. 107 * 108 * MCR_TRYHARD implies a non-blocking allocation request, regardless of 109 * whether MCR_NOSLEEP is set. It informs the allocator that the request 110 * should not cause the calling thread to block, and that it must have 111 * exhausted all possible schemes to fulfill the request, including doing 112 * reclaims and/or purges, before returning to the caller. 113 * 114 * Regular mcache clients should only use MCR_SLEEP or MCR_NOSLEEP. 115 */ 116 #define MCR_SLEEP 0x0000 /* same as M_WAITOK */ 117 #define MCR_NOSLEEP 0x0001 /* same as M_NOWAIT */ 118 #define MCR_FAILOK 0x0100 /* private, for internal use only */ 119 #define MCR_TRYHARD 0x0200 /* private, for internal use only */ 120 #define MCR_USR1 0x1000 /* private, for internal use only */ 121 122 #define MCR_NONBLOCKING (MCR_NOSLEEP | MCR_FAILOK | MCR_TRYHARD) 123 124 /* 125 * Generic one-way linked list element structure. This is used to handle 126 * mcache_alloc_ext() requests in order to chain the allocated objects 127 * together before returning them to the caller. 128 */ 129 typedef struct mcache_obj { 130 struct mcache_obj *obj_next; 131 } mcache_obj_t; 132 133 typedef struct mcache_bkt { 134 void *bkt_next; /* next bucket in list */ 135 struct mcache_bkttype *bkt_type; /* bucket type */ 136 void *bkt_obj[1]; /* one or more objects */ 137 } mcache_bkt_t; 138 139 typedef struct mcache_bktlist { 140 mcache_bkt_t *bl_list; /* bucket list */ 141 u_int32_t bl_total; /* number of buckets */ 142 u_int32_t bl_min; /* min since last update */ 143 u_int32_t bl_reaplimit; /* max reapable buckets */ 144 u_int64_t bl_alloc; /* allocations from this list */ 145 } mcache_bktlist_t; 146 147 typedef struct mcache_bkttype { 148 int bt_bktsize; /* bucket size (number of elements) */ 149 size_t bt_minbuf; /* all smaller buffers qualify */ 150 size_t bt_maxbuf; /* no larger bfufers qualify */ 151 struct mcache *bt_cache; /* bucket cache */ 152 } mcache_bkttype_t; 153 154 typedef struct mcache_cpu { 155 decl_lck_mtx_data(, cc_lock); 156 mcache_bkt_t *cc_filled; /* the currently filled bucket */ 157 mcache_bkt_t *cc_pfilled; /* the previously filled bucket */ 158 u_int64_t cc_alloc; /* allocations from this cpu */ 159 u_int64_t cc_free; /* frees to this cpu */ 160 int cc_objs; /* number of objects in filled bkt */ 161 int cc_pobjs; /* number of objects in previous bkt */ 162 int cc_bktsize; /* number of elements in a full bkt */ 163 } __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE))) mcache_cpu_t; 164 165 typedef unsigned int (*mcache_allocfn_t)(void *, mcache_obj_t ***, 166 unsigned int, int); 167 typedef void (*mcache_freefn_t)(void *, mcache_obj_t *, boolean_t); 168 typedef void (*mcache_auditfn_t)(void *, mcache_obj_t *, boolean_t); 169 typedef void (*mcache_logfn_t)(u_int32_t, mcache_obj_t *, boolean_t); 170 typedef void (*mcache_notifyfn_t)(void *, u_int32_t); 171 172 typedef struct mcache { 173 /* 174 * Cache properties 175 */ 176 LIST_ENTRY(mcache) mc_list; /* cache linkage */ 177 char mc_name[32]; /* cache name */ 178 struct zone *mc_slab_zone; /* backend zone allocator */ 179 mcache_allocfn_t mc_slab_alloc; /* slab layer allocate callback */ 180 mcache_freefn_t mc_slab_free; /* slab layer free callback */ 181 mcache_auditfn_t mc_slab_audit; /* slab layer audit callback */ 182 mcache_logfn_t mc_slab_log; /* slab layer log callback */ 183 mcache_notifyfn_t mc_slab_notify; /* slab layer notify callback */ 184 void *mc_private; /* opaque arg to callbacks */ 185 size_t mc_bufsize; /* object size */ 186 size_t mc_align; /* object alignment */ 187 u_int32_t mc_flags; /* cache creation flags */ 188 u_int32_t mc_purge_cnt; /* # of purges requested by slab */ 189 u_int32_t mc_enable_cnt; /* # of reenables due to purges */ 190 u_int32_t mc_waiter_cnt; /* # of slab layer waiters */ 191 u_int32_t mc_wretry_cnt; /* # of wait retries */ 192 u_int32_t mc_nwretry_cnt; /* # of no-wait retry attempts */ 193 u_int32_t mc_nwfail_cnt; /* # of no-wait retries that failed */ 194 decl_lck_mtx_data(, mc_sync_lock); /* protects purges and reenables */ 195 lck_grp_t *mc_sync_lock_grp; 196 /* 197 * Keep CPU and buckets layers lock statistics separate. 198 */ 199 lck_grp_t *mc_cpu_lock_grp; 200 201 /* 202 * Bucket layer common to all CPUs 203 */ 204 decl_lck_mtx_data(, mc_bkt_lock); 205 lck_grp_t *mc_bkt_lock_grp; 206 mcache_bkttype_t *cache_bkttype; /* bucket type */ 207 mcache_bktlist_t mc_full; /* full buckets */ 208 mcache_bktlist_t mc_empty; /* empty buckets */ 209 size_t mc_chunksize; /* bufsize + alignment */ 210 u_int32_t mc_bkt_contention; /* lock contention count */ 211 u_int32_t mc_bkt_contention_prev; /* previous snapshot */ 212 213 /* 214 * Per-CPU layer, aligned at cache line boundary 215 */ 216 mcache_cpu_t mc_cpu[1] 217 __attribute__((aligned(MAX_CPU_CACHE_LINE_SIZE))); 218 } mcache_t; 219 220 #define MCACHE_ALIGN 8 /* default guaranteed alignment */ 221 222 /* Valid values for mc_flags */ 223 #define MCF_VERIFY 0x00000001 /* enable verification */ 224 #define MCF_TRACE 0x00000002 /* enable transaction auditing */ 225 #define MCF_NOCPUCACHE 0x00000010 /* disable CPU layer caching */ 226 #define MCF_NOLEAKLOG 0x00000100 /* disable leak logging */ 227 #define MCF_EXPLEAKLOG 0x00000200 /* expose leak info to user space */ 228 229 #define MCF_DEBUG (MCF_VERIFY | MCF_TRACE) 230 #define MCF_FLAGS_MASK \ 231 (MCF_DEBUG | MCF_NOCPUCACHE | MCF_NOLEAKLOG | MCF_EXPLEAKLOG) 232 233 /* Valid values for notify callback */ 234 #define MCN_RETRYALLOC 0x00000001 /* Allocation should be retried */ 235 236 #define MCACHE_STACK_DEPTH 16 237 238 #define MCA_TRN_MAX 2 /* Number of transactions to record */ 239 240 #define DUMP_MCA_BUF_SIZE 512 241 242 typedef struct mcache_audit { 243 struct mcache_audit *mca_next; /* next audit struct */ 244 void *mca_addr; /* address of buffer */ 245 mcache_t *mca_cache; /* parent cache of the buffer */ 246 size_t mca_contents_size; /* size of saved contents */ 247 void *mca_contents; /* user-specific saved contents */ 248 void *mca_uptr; /* user-specific pointer */ 249 uint32_t mca_uflags; /* user-specific flags */ 250 uint32_t mca_next_trn; 251 struct mca_trn { 252 struct thread *mca_thread; /* thread doing transaction */ 253 uint32_t mca_tstamp; 254 uint16_t mca_depth; 255 void *mca_stack[MCACHE_STACK_DEPTH]; 256 } mca_trns[MCA_TRN_MAX]; 257 } mcache_audit_t; 258 259 __private_extern__ int assfail(const char *, const char *, int) __abortlike; 260 __private_extern__ void mcache_init(void); 261 __private_extern__ unsigned int mcache_getflags(void); 262 __private_extern__ unsigned int mcache_cache_line_size(void); 263 __private_extern__ mcache_t *mcache_create(const char *, size_t, 264 size_t, u_int32_t, int); 265 __private_extern__ void *mcache_alloc(mcache_t *, int); 266 __private_extern__ void mcache_free(mcache_t *, void *); 267 __private_extern__ mcache_t *mcache_create_ext(const char *, size_t, 268 mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t, 269 mcache_notifyfn_t, void *__unsafe_indexable, u_int32_t, int); 270 __private_extern__ void mcache_destroy(mcache_t *); 271 __private_extern__ unsigned int mcache_alloc_ext(mcache_t *, mcache_obj_t **, 272 unsigned int, int); 273 __private_extern__ void mcache_free_ext(mcache_t *, mcache_obj_t *); 274 __private_extern__ void mcache_reap(void); 275 __private_extern__ void mcache_reap_now(mcache_t *, boolean_t); 276 __private_extern__ boolean_t mcache_purge_cache(mcache_t *, boolean_t); 277 __private_extern__ void mcache_waiter_inc(mcache_t *); 278 __private_extern__ void mcache_waiter_dec(mcache_t *); 279 __private_extern__ boolean_t mcache_bkt_isempty(mcache_t *); 280 281 struct timeval; 282 __private_extern__ void mcache_buffer_log(mcache_audit_t *, void *, mcache_t *, 283 struct timeval *); 284 __private_extern__ void mcache_set_pattern(u_int64_t, void *, size_t); 285 __private_extern__ void *mcache_verify_pattern(u_int64_t, void *, size_t); 286 __private_extern__ void mcache_audit_free_verify(mcache_audit_t *, 287 void *, size_t, size_t); 288 __private_extern__ void mcache_audit_free_verify_set(mcache_audit_t *, 289 void *, size_t, size_t); 290 __private_extern__ char *mcache_dump_mca(char buf[DUMP_MCA_BUF_SIZE], mcache_audit_t *); 291 292 extern mcache_t *mcache_audit_cache; 293 294 #ifdef __cplusplus 295 } 296 #endif 297 298 #endif /* KERNEL_PRIVATE */ 299 300 #endif /* _SYS_MCACHE_H */ 301