xref: /xnu-11417.140.69/bsd/skywalk/mem/skmem_cache_var.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_MEM_SKMEMCACHEVAR_H
30 #define _SKYWALK_MEM_SKMEMCACHEVAR_H
31 
32 #ifdef BSD_KERNEL_PRIVATE
33 #include <skywalk/core/skywalk_var.h>
34 #include <skywalk/os_channel_private.h>
35 #include <kern/cpu_number.h>
36 #include <machine/machine_routines.h>
37 
38 /*
39  * Buffer control.
40  */
41 struct skmem_bufctl {
42 	SLIST_ENTRY(skmem_bufctl) bc_link;      /* bufctl linkage */
43 	void                    *__sized_by(bc_lim) bc_addr;       /* buffer obj address */
44 	void                    *bc_addrm;      /* mirrored buffer obj addr */
45 	struct skmem_slab       *bc_slab;       /* controlling slab */
46 	uint32_t                bc_lim;         /* buffer obj limit */
47 	uint32_t                bc_flags;       /* SKMEM_BUFCTL_* flags */
48 	uint32_t                bc_idx;         /* buffer index within slab */
49 	volatile uint32_t       bc_usecnt;      /* outstanding use */
50 };
51 
52 #define SKMEM_BUFCTL_SHAREOK    0x1             /* supports sharing */
53 
54 #define SKMEM_STACK_DEPTH       16              /* maximum audit stack depth */
55 
56 #define SKMEM_CACHE_ALIGN       8               /* min guaranteed alignment */
57 
58 #define SKMEM_MEMTAG_STRIP_TAG(addr, size)                             \
59 	__unsafe_forge_bidi_indexable(void *,                          \
60 	    vm_memtag_canonicalize_kernel((vm_offset_t)addr), size)
61 
62 #define SKMEM_COMPARE_CANONICAL_ADDR(addr1, addr2, size)               \
63 	SKMEM_MEMTAG_STRIP_TAG(addr1, size) == SKMEM_MEMTAG_STRIP_TAG(addr2, size)
64 
65 /*
66  * Alternative buffer control if SKM_MODE_AUDIT is set.
67  */
68 struct skmem_bufctl_audit {
69 	SLIST_ENTRY(skmem_bufctl) bc_link;      /* bufctl linkage */
70 	void                    *__sized_by(bc_lim) bc_addr;       /* buffer address */
71 	void                    *bc_addrm;      /* mirrored buffer address */
72 	struct skmem_slab       *bc_slab;       /* controlling slab */
73 	uint32_t                bc_lim;         /* buffer obj limit */
74 	uint32_t                bc_flags;       /* SKMEM_BUFCTL_* flags */
75 	uint32_t                bc_idx;         /* buffer index within slab */
76 	volatile uint32_t       bc_usecnt;      /* outstanding use */
77 	struct thread           *bc_thread;     /* thread doing transaction */
78 	uint32_t                bc_timestamp;   /* transaction time */
79 	uint32_t                bc_depth;       /* stack depth */
80 	void                    *bc_stack[SKMEM_STACK_DEPTH]; /* stack */
81 };
82 
83 /*
84  * Buffer control hash bucket.
85  */
86 struct skmem_bufctl_bkt {
87 	SLIST_HEAD(, skmem_bufctl) bcb_head;    /* bufctl allocated list */
88 };
89 
90 /*
91  * Slab.
92  */
93 struct skmem_slab {
94 	TAILQ_ENTRY(skmem_slab) sl_link;        /* slab freelist linkage */
95 	struct skmem_cache      *sl_cache;      /* controlling cache */
96 	void                    *sl_base;       /* base of allocated memory */
97 	void                    *sl_basem;      /* base of mirrored memory */
98 	struct sksegment        *sl_seg;        /* backing segment */
99 	struct sksegment        *sl_segm;       /* backing mirrored segment */
100 	SLIST_HEAD(, skmem_bufctl) sl_head;     /* bufctl free list */
101 	uint32_t                sl_refcnt;      /* outstanding allocations */
102 	uint32_t                sl_chunks;      /* # of buffers in slab */
103 };
104 
105 #define SKMEM_SLAB_IS_PARTIAL(sl)       \
106 	((sl)->sl_refcnt > 0 && (sl)->sl_refcnt < (sl)->sl_chunks)
107 
108 #define SKMEM_SLAB_MEMBER(sl, buf)      \
109 	(((size_t)(buf) - (size_t)vm_memtag_canonicalize_kernel((vm_offset_t)(sl)->sl_base)) \
110 	     < (sl)->sl_cache->skm_slabsize)
111 
112 /*
113  * Magazine type.
114  */
115 struct skmem_magtype {
116 	int                     mt_magsize;     /* magazine size (# of objs) */
117 	int                     mt_align;       /* magazine alignment */
118 	size_t                  mt_minbuf;      /* all smaller bufs qualify */
119 	size_t                  mt_maxbuf;      /* no larger bufs qualify */
120 	struct skmem_cache      *mt_cache;      /* magazine cache */
121 	char                    mt_cname[64];   /* magazine cache name */
122 };
123 
124 /*
125  * Magazine.
126  */
127 struct skmem_mag {
128 	SLIST_ENTRY(skmem_mag)  mg_link;        /* magazine linkage */
129 	struct skmem_magtype    *mg_magtype;    /* magazine type */
130 	size_t                  mg_count;       /* # of mg_round array elements */
131 	void                    *mg_round[__counted_by(mg_count)];   /* one or more objs */
132 };
133 
134 #define SKMEM_MAG_SIZE(n)       \
135 	offsetof(struct skmem_mag, mg_round[n])
136 
137 /*
138  * Magazine depot.
139  */
140 struct skmem_maglist {
141 	SLIST_HEAD(, skmem_mag) ml_list;        /* magazine list */
142 	uint32_t                ml_total;       /* number of magazines */
143 	uint32_t                ml_min;         /* min since last update */
144 	uint32_t                ml_reaplimit;   /* max reapable magazines */
145 	uint64_t                ml_alloc;       /* allocations from this list */
146 };
147 
148 /*
149  * Per-CPU cache structure.
150  */
151 struct skmem_cpu_cache {
152 	decl_lck_mtx_data(, cp_lock);
153 	struct skmem_mag        *cp_loaded;     /* currently filled magazine */
154 	struct skmem_mag        *cp_ploaded;    /* previously filled magazine */
155 	uint64_t                cp_alloc;       /* allocations from this cpu */
156 	uint64_t                cp_free;        /* frees to this cpu */
157 	int                     cp_rounds;      /* # of objs in filled mag */
158 	int                     cp_prounds;     /* # of objs in previous mag */
159 	int                     cp_magsize;     /* # of objs in a full mag */
160 } __attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
161 
162 /*
163  * Object's region information.
164  *
165  * This info is provided to skmem_ctor_fn_t() to assist master and
166  * slave objects construction.  It is also provided separately via
167  * skmem_cache_get_obj_info() when called on an object that's been
168  * allocated from skmem_cache.  Information about slave object is
169  * available only at constructor time.
170  */
171 struct skmem_obj_info {
172 	void                    *__sized_by(oi_size) oi_addr;       /* object address */
173 	struct skmem_bufctl     *oi_bc;         /* buffer control (master) */
174 	uint32_t                oi_size;        /* actual object size */
175 	obj_idx_t               oi_idx_reg;     /* object idx within region */
176 	obj_idx_t               oi_idx_seg;     /* object idx within segment */
177 } __attribute__((__packed__));
178 
179 /*
180  * Generic one-way linked list element structure.  This is used to
181  * handle skmem_cache_batch_alloc() requests in order to chain the
182  * allocated objects together before returning them to the caller.
183  * It is also used when freeing a batch of packets by the caller of
184  * skmem_cache_batch_free().  Note that this requires the region's
185  * object to be at least the size of struct skmem_obj, as we store
186  * this information at the beginning of each object in the chain.
187  */
188 struct skmem_obj {
189 	/*
190 	 * Given that we overlay this structure on top of whatever
191 	 * structure that the object represents, the constructor must
192 	 * ensure that it reserves at least the size of a pointer
193 	 * at the top for the linkage.
194 	 */
195 	struct skmem_obj        *mo_next;       /* next object in the list */
196 	/*
197 	 * The following are used only for raw (unconstructed) objects
198 	 * coming out of the slab layer during allocations.  They are
199 	 * not touched otherwise by skmem_cache when the object resides
200 	 * in the magazine.  By utilizing this space, we avoid having
201 	 * to allocate temporary storage elsewhere.
202 	 */
203 	struct skmem_obj_info   mo_info;        /* object's info */
204 	struct skmem_obj_info   mo_minfo;       /* mirrored object's info */
205 };
206 
207 #define SKMEM_OBJ_ADDR(_oi)     (_oi)->oi_addr
208 #define SKMEM_OBJ_BUFCTL(_oi)   (_oi)->oi_bc
209 #define SKMEM_OBJ_SIZE(_oi)     (_oi)->oi_size
210 #define SKMEM_OBJ_IDX_REG(_oi)  (_oi)->oi_idx_reg
211 #define SKMEM_OBJ_IDX_SEG(_oi)  (_oi)->oi_idx_seg
212 /* segment the object belongs to (only for master) */
213 #define SKMEM_OBJ_SEG(_oi)      (_oi)->oi_bc->bc_slab->sl_seg
214 /* offset of object relative to the object's own region */
215 #define SKMEM_OBJ_ROFF(_oi)     \
216 	((mach_vm_offset_t)(SKMEM_OBJ_SIZE(_oi) * SKMEM_OBJ_IDX_REG(_oi)))
217 
218 typedef int (*skmem_ctor_fn_t)(struct skmem_obj_info *,
219     struct skmem_obj_info *, void *, uint32_t);
220 typedef void (*skmem_dtor_fn_t)(void *, void *);
221 typedef void (*skmem_reclaim_fn_t)(void *);
222 typedef int (*skmem_slab_alloc_fn_t)(struct skmem_cache *,
223     struct skmem_obj_info *, struct skmem_obj_info *, uint32_t);
224 typedef void (*skmem_slab_free_fn_t)(struct skmem_cache *, void *);
225 
226 /*
227  * Cache.
228  */
229 struct skmem_cache {
230 #if KASAN
231 	void            *skm_start;
232 	uint32_t        skm_align[0];
233 #endif
234 	/*
235 	 * Commonly-accessed elements during alloc and free.
236 	 */
237 	uint32_t        skm_mode;               /* cache mode flags */
238 	skmem_ctor_fn_t skm_ctor;               /* object constructor */
239 	skmem_dtor_fn_t skm_dtor;               /* object destructor */
240 	skmem_reclaim_fn_t skm_reclaim;         /* cache reclaim */
241 	void            *skm_private;           /* opaque arg to callbacks */
242 
243 	/*
244 	 * Depot.
245 	 */
246 	decl_lck_mtx_data(, skm_dp_lock);       /* protects depot layer */
247 	struct skmem_magtype *skm_magtype;      /* magazine type */
248 	struct skmem_maglist skm_full;          /* full magazines */
249 	struct skmem_maglist skm_empty;         /* empty magazines */
250 
251 	/*
252 	 * Slab.
253 	 */
254 	decl_lck_mtx_data(, skm_sl_lock);       /* protects slab layer */
255 	skmem_slab_alloc_fn_t skm_slab_alloc;   /* slab allocate */
256 	skmem_slab_free_fn_t skm_slab_free;     /* slab free */
257 	size_t          skm_chunksize;          /* bufsize + alignment */
258 	size_t          skm_objsize;            /* actual obj size in slab */
259 	size_t          skm_slabsize;           /* size of a slab */
260 	size_t          skm_hash_initial;       /* initial hash table size */
261 	size_t          skm_hash_limit;         /* hash table size limit */
262 	size_t          skm_hash_shift;         /* get to interesting bits */
263 	size_t          skm_hash_mask;          /* hash table mask */
264 	size_t          skm_hash_size;
265 	struct skmem_bufctl_bkt *__counted_by(skm_hash_size) skm_hash_table; /* alloc'd buffer htable */
266 	TAILQ_HEAD(, skmem_slab) skm_sl_partial_list; /* partially-allocated */
267 	TAILQ_HEAD(, skmem_slab) skm_sl_empty_list;   /* fully-allocated */
268 	struct skmem_region *skm_region;        /* region source for slabs */
269 
270 	/*
271 	 * Statistics.
272 	 */
273 	uint32_t        skm_cpu_mag_size;       /* current magazine size */
274 	uint32_t        skm_cpu_mag_resize;     /* # of magazine resizes */
275 	uint32_t        skm_cpu_mag_purge;      /* # of magazine purges */
276 	uint32_t        skm_cpu_mag_reap;       /* # of magazine reaps */
277 	uint64_t        skm_depot_contention;   /* mutex contention count */
278 	uint64_t        skm_depot_contention_prev; /* previous snapshot */
279 	uint32_t        skm_depot_full;         /* # of full magazines */
280 	uint32_t        skm_depot_empty;        /* # of empty magazines */
281 	uint32_t        skm_depot_ws_zero;      /* # of working set flushes */
282 	uint32_t        skm_sl_rescale;         /* # of hash table rescales */
283 	uint32_t        skm_sl_create;          /* slab creates */
284 	uint32_t        skm_sl_destroy;         /* slab destroys */
285 	uint32_t        skm_sl_alloc;           /* slab layer allocations */
286 	uint32_t        skm_sl_free;            /* slab layer frees */
287 	uint32_t        skm_sl_partial;         /* # of partial slabs */
288 	uint32_t        skm_sl_empty;           /* # of empty slabs */
289 	uint64_t        skm_sl_alloc_fail;      /* total failed allocations */
290 	uint64_t        skm_sl_bufinuse;        /* total unfreed buffers */
291 	uint64_t        skm_sl_bufmax;          /* max buffers ever */
292 
293 	/*
294 	 * Cache properties.
295 	 */
296 	TAILQ_ENTRY(skmem_cache) skm_link;      /* cache linkage */
297 	char            skm_name[64];           /* cache name */
298 	uuid_t          skm_uuid;               /* cache uuid */
299 	size_t          skm_bufsize;            /* buffer size */
300 	size_t          skm_bufalign;           /* buffer alignment */
301 	size_t          skm_objalign;           /* object alignment */
302 
303 	/*
304 	 * CPU layer, aligned at (maximum) cache line boundary.
305 	 */
306 	decl_lck_mtx_data(, skm_rs_lock);       /* protects resizing */
307 	struct thread    *skm_rs_owner;         /* resize owner */
308 	uint32_t        skm_rs_busy;            /* prevent resizing */
309 	uint32_t        skm_rs_want;            /* # of threads blocked */
310 	size_t          skm_cpu_cache_count;
311 	struct skmem_cpu_cache  skm_cpu_cache[__counted_by(skm_cpu_cache_count)]
312 	__attribute__((aligned(CHANNEL_CACHE_ALIGN_MAX)));
313 };
314 
315 #define SKMEM_CACHE_SIZE(n)     \
316 	offsetof(struct skmem_cache, skm_cpu_cache[n])
317 
318 #define SKMEM_CPU_CACHE(c)                                      \
319 	((struct skmem_cpu_cache *)((void *)((char *)(c) +      \
320 	SKMEM_CACHE_SIZE(cpu_number()))))
321 
322 /* valid values for skm_mode, set only by skmem_cache_create() */
323 #define SKM_MODE_NOMAGAZINES    0x00000001      /* disable magazines layer */
324 #define SKM_MODE_AUDIT          0x00000002      /* audit transactions */
325 #define SKM_MODE_NOREDIRECT     0x00000004      /* unaffected by defunct */
326 #define SKM_MODE_BATCH          0x00000008      /* supports batch alloc/free */
327 #define SKM_MODE_DYNAMIC        0x00000010      /* enable magazine resizing */
328 #define SKM_MODE_CLEARONFREE    0x00000020      /* zero-out upon slab free */
329 #define SKM_MODE_PSEUDO         0x00000040      /* external backing store */
330 #define SKM_MODE_RECLAIM        0x00000080      /* aggressive memory reclaim */
331 
332 #define SKM_MODE_BITS \
333 	"\020\01NOMAGAZINES\02AUDIT\03NOREDIRECT\04BATCH\05DYNAMIC"     \
334 	"\06CLEARONFREE\07PSEUDO\10RECLAIM"
335 
336 /*
337  * Valid flags for sk{mem,region}_alloc().  SKMEM_FAILOK is valid only if
338  * SKMEM_SLEEP is set, i.e. SKMEM_{NOSLEEP,FAILOK} are mutually exclusive.
339  * If set, SKMEM_FAILOK indicates that the segment allocation may fail,
340  * and that the cache layer would handle the retries rather than blocking
341  * inside the region allocator.
342  */
343 #define SKMEM_SLEEP             0x0     /* can block for memory; won't fail */
344 #define SKMEM_NOSLEEP           0x1     /* cannot block for memory; may fail */
345 #define SKMEM_PANIC             0x2     /* panic upon allocation failure */
346 #define SKMEM_FAILOK            0x4     /* can fail for blocking alloc */
347 
348 /* valid flag values for skmem_cache_create() */
349 #define SKMEM_CR_NOMAGAZINES    0x1     /* disable magazines layer */
350 #define SKMEM_CR_BATCH          0x2     /* support batch alloc/free */
351 #define SKMEM_CR_DYNAMIC        0x4     /* enable magazine resizing */
352 #define SKMEM_CR_CLEARONFREE    0x8     /* zero-out upon slab free */
353 #define SKMEM_CR_RECLAIM        0x10    /* aggressive memory reclaim */
354 
355 __BEGIN_DECLS
356 /*
357  * Given a buffer control, add a use count to it.
358  */
359 __attribute__((always_inline))
360 static inline void
skmem_bufctl_use(struct skmem_bufctl * bc)361 skmem_bufctl_use(struct skmem_bufctl *bc)
362 {
363 	uint32_t old, new;
364 
365 	os_atomic_rmw_loop(&bc->bc_usecnt, old, new, relaxed, {
366 		new = old + 1;
367 		VERIFY(new != 0);
368 		ASSERT(new == 1 || (bc->bc_flags & SKMEM_BUFCTL_SHAREOK));
369 	});
370 }
371 
372 /*
373  * Given a buffer control, remove a use count from it (returns new value).
374  */
375 __attribute__((always_inline))
376 static inline uint32_t
skmem_bufctl_unuse(struct skmem_bufctl * bc)377 skmem_bufctl_unuse(struct skmem_bufctl *bc)
378 {
379 	uint32_t old, new;
380 
381 	os_atomic_rmw_loop(&bc->bc_usecnt, old, new, relaxed, {
382 		new = old - 1;
383 		VERIFY(old != 0);
384 		ASSERT(old == 1 || (bc->bc_flags & SKMEM_BUFCTL_SHAREOK));
385 	});
386 
387 	return new;
388 }
389 
390 extern struct skmem_cache *skmem_slab_cache;    /* cache for skmem_slab */
391 extern struct skmem_cache *skmem_bufctl_cache;  /* cache for skmem_bufctl */
392 extern unsigned int bc_size;                    /* size of bufctl */
393 extern int skmem_slab_alloc_locked(struct skmem_cache *,
394     struct skmem_obj_info *, struct skmem_obj_info *, uint32_t);
395 extern void skmem_slab_free_locked(struct skmem_cache *, void *);
396 extern int skmem_slab_alloc_pseudo_locked(struct skmem_cache *,
397     struct skmem_obj_info *, struct skmem_obj_info *, uint32_t);
398 extern void skmem_slab_free_pseudo_locked(struct skmem_cache *, void *);
399 extern void skmem_slab_free(struct skmem_cache *, void *);
400 extern void skmem_slab_batch_free(struct skmem_cache *, struct skmem_obj *);
401 extern uint32_t skmem_slab_batch_alloc(struct skmem_cache *, struct skmem_obj **,
402     uint32_t, uint32_t);
403 extern int skmem_slab_alloc(struct skmem_cache *, struct skmem_obj_info *,
404     struct skmem_obj_info *, uint32_t);
405 extern void skmem_audit_bufctl(struct skmem_bufctl *);
406 #define SKM_SLAB_LOCK(_skm)                     \
407 	lck_mtx_lock(&(_skm)->skm_sl_lock)
408 #define SKM_SLAB_LOCK_ASSERT_HELD(_skm)         \
409 	LCK_MTX_ASSERT(&(_skm)->skm_sl_lock, LCK_MTX_ASSERT_OWNED)
410 #define SKM_SLAB_LOCK_ASSERT_NOTHELD(_skm)      \
411 	LCK_MTX_ASSERT(&(_skm)->skm_sl_lock, LCK_MTX_ASSERT_NOTOWNED)
412 #define SKM_SLAB_UNLOCK(_skm)                   \
413 	lck_mtx_unlock(&(_skm)->skm_sl_lock)
414 #define SKMEM_CACHE_HASH_INDEX(_a, _s, _m)      (((_a) >> (_s)) & (_m))
415 #define SKMEM_CACHE_HASH(_skm, _buf)                                     \
416 	(&(_skm)->skm_hash_table[SKMEM_CACHE_HASH_INDEX((uintptr_t)_buf, \
417 	(_skm)->skm_hash_shift, (_skm)->skm_hash_mask)])
418 
419 extern void skmem_cache_pre_init(void);
420 extern void skmem_cache_init(void);
421 extern void skmem_cache_fini(void);
422 extern struct skmem_cache *skmem_cache_create(const char *, size_t, size_t,
423     skmem_ctor_fn_t, skmem_dtor_fn_t, skmem_reclaim_fn_t, void *,
424     struct skmem_region *, uint32_t);
425 extern void skmem_cache_destroy(struct skmem_cache *);
426 
427 extern uint32_t skmem_cache_batch_alloc(struct skmem_cache *,
428     struct skmem_obj **list, size_t objsize, uint32_t, uint32_t);
429 
430 /*
431  * XXX -fbounds-safety: Sometimes we use skmem_cache_alloc to allocate a struct
432  * with a flexible array (e.g. struct skmem_mag). For those, we can't have the
433  * alloc function return void *__single, because we lose bounds information.
434  */
435 static inline void *__header_indexable
skmem_cache_alloc(struct skmem_cache * skm,uint32_t skmflag)436 skmem_cache_alloc(struct skmem_cache *skm, uint32_t skmflag)
437 {
438 	struct skmem_obj *__single buf;
439 
440 	(void) skmem_cache_batch_alloc(skm, &buf, skm->skm_objsize, 1, skmflag);
441 
442 	/* This is one of the few places where using __unsafe_forge is okay */
443 	return __unsafe_forge_bidi_indexable(void *, buf, buf ? skm->skm_objsize : 0);
444 }
445 
446 extern void skmem_cache_free(struct skmem_cache *, void *);
447 extern void skmem_cache_free_nocache(struct skmem_cache *, void *);
448 extern void skmem_cache_batch_free(struct skmem_cache *, struct skmem_obj *);
449 extern void skmem_cache_batch_free_nocache(struct skmem_cache *, struct skmem_obj *);
450 extern void skmem_cache_reap_now(struct skmem_cache *, boolean_t);
451 extern void skmem_cache_reap(void);
452 extern void skmem_reap_caches(boolean_t);
453 extern void skmem_cache_get_obj_info(struct skmem_cache *, void *,
454     struct skmem_obj_info *, struct skmem_obj_info *);
455 extern uint32_t skmem_cache_magazine_max(uint32_t);
456 extern boolean_t skmem_allow_magazines(void);
457 #if (DEVELOPMENT || DEBUG)
458 extern void skmem_cache_test_start(uint32_t);
459 extern void skmem_cache_test_stop(void);
460 #endif /* (DEVELOPMENT || DEBUG) */
461 __END_DECLS
462 #endif /* BSD_KERNEL_PRIVATE */
463 #endif /* _SKYWALK_MEM_SKMEMCACHEVAR_H */
464