xref: /xnu-11417.140.69/bsd/kern/mcache.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1*43a90889SApple OSS Distributions /*
2*43a90889SApple OSS Distributions  * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3*43a90889SApple OSS Distributions  *
4*43a90889SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*43a90889SApple OSS Distributions  *
6*43a90889SApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*43a90889SApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*43a90889SApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*43a90889SApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*43a90889SApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*43a90889SApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*43a90889SApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*43a90889SApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*43a90889SApple OSS Distributions  *
15*43a90889SApple OSS Distributions  * Please obtain a copy of the License at
16*43a90889SApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*43a90889SApple OSS Distributions  *
18*43a90889SApple OSS Distributions  * The Original Code and all software distributed under the License are
19*43a90889SApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*43a90889SApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*43a90889SApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*43a90889SApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*43a90889SApple OSS Distributions  * Please see the License for the specific language governing rights and
24*43a90889SApple OSS Distributions  * limitations under the License.
25*43a90889SApple OSS Distributions  *
26*43a90889SApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*43a90889SApple OSS Distributions  */
28*43a90889SApple OSS Distributions 
29*43a90889SApple OSS Distributions /*
30*43a90889SApple OSS Distributions  * Memory allocator with per-CPU caching, derived from the kmem magazine
31*43a90889SApple OSS Distributions  * concept and implementation as described in the following paper:
32*43a90889SApple OSS Distributions  * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick.pdf
33*43a90889SApple OSS Distributions  * That implementation is Copyright 2006 Sun Microsystems, Inc.  All rights
34*43a90889SApple OSS Distributions  * reserved.  Use is subject to license terms.
35*43a90889SApple OSS Distributions  *
36*43a90889SApple OSS Distributions  * There are several major differences between this and the original kmem
37*43a90889SApple OSS Distributions  * magazine: this derivative implementation allows for multiple objects to
38*43a90889SApple OSS Distributions  * be allocated and freed from/to the object cache in one call; in addition,
39*43a90889SApple OSS Distributions  * it provides for better flexibility where the user is allowed to define
40*43a90889SApple OSS Distributions  * its own slab allocator (instead of the default zone allocator).  Finally,
41*43a90889SApple OSS Distributions  * no object construction/destruction takes place at the moment, although
42*43a90889SApple OSS Distributions  * this could be added in future to improve efficiency.
43*43a90889SApple OSS Distributions  */
44*43a90889SApple OSS Distributions 
45*43a90889SApple OSS Distributions #include <sys/param.h>
46*43a90889SApple OSS Distributions #include <sys/types.h>
47*43a90889SApple OSS Distributions #include <sys/malloc.h>
48*43a90889SApple OSS Distributions #include <sys/mbuf.h>
49*43a90889SApple OSS Distributions #include <sys/queue.h>
50*43a90889SApple OSS Distributions #include <sys/kernel.h>
51*43a90889SApple OSS Distributions #include <sys/systm.h>
52*43a90889SApple OSS Distributions 
53*43a90889SApple OSS Distributions #include <kern/debug.h>
54*43a90889SApple OSS Distributions #include <kern/zalloc.h>
55*43a90889SApple OSS Distributions #include <kern/cpu_number.h>
56*43a90889SApple OSS Distributions #include <kern/locks.h>
57*43a90889SApple OSS Distributions #include <kern/thread_call.h>
58*43a90889SApple OSS Distributions 
59*43a90889SApple OSS Distributions #include <libkern/libkern.h>
60*43a90889SApple OSS Distributions #include <libkern/OSAtomic.h>
61*43a90889SApple OSS Distributions #include <libkern/OSDebug.h>
62*43a90889SApple OSS Distributions 
63*43a90889SApple OSS Distributions #include <mach/vm_param.h>
64*43a90889SApple OSS Distributions #include <machine/limits.h>
65*43a90889SApple OSS Distributions #include <machine/machine_routines.h>
66*43a90889SApple OSS Distributions 
67*43a90889SApple OSS Distributions #include <os/atomic_private.h>
68*43a90889SApple OSS Distributions 
69*43a90889SApple OSS Distributions #include <string.h>
70*43a90889SApple OSS Distributions 
71*43a90889SApple OSS Distributions #include <sys/mcache.h>
72*43a90889SApple OSS Distributions 
73*43a90889SApple OSS Distributions #define MCACHE_SIZE(n) \
74*43a90889SApple OSS Distributions 	__builtin_offsetof(mcache_t, mc_cpu[n])
75*43a90889SApple OSS Distributions 
76*43a90889SApple OSS Distributions /* Allocate extra in case we need to manually align the pointer */
77*43a90889SApple OSS Distributions #define MCACHE_ALLOC_SIZE \
78*43a90889SApple OSS Distributions 	(sizeof (void *) + MCACHE_SIZE(ncpu) + CPU_CACHE_LINE_SIZE)
79*43a90889SApple OSS Distributions 
80*43a90889SApple OSS Distributions #define MCACHE_CPU(c) \
81*43a90889SApple OSS Distributions 	(mcache_cpu_t *)((void *)((char *)(c) + MCACHE_SIZE(cpu_number())))
82*43a90889SApple OSS Distributions 
83*43a90889SApple OSS Distributions /*
84*43a90889SApple OSS Distributions  * MCACHE_LIST_LOCK() and MCACHE_LIST_UNLOCK() are macros used
85*43a90889SApple OSS Distributions  * to serialize accesses to the global list of caches in the system.
86*43a90889SApple OSS Distributions  * They also record the thread currently running in the critical
87*43a90889SApple OSS Distributions  * section, so that we can avoid recursive requests to reap the
88*43a90889SApple OSS Distributions  * caches when memory runs low.
89*43a90889SApple OSS Distributions  */
90*43a90889SApple OSS Distributions #define MCACHE_LIST_LOCK() {                            \
91*43a90889SApple OSS Distributions 	lck_mtx_lock(&mcache_llock);                     \
92*43a90889SApple OSS Distributions 	mcache_llock_owner = current_thread();          \
93*43a90889SApple OSS Distributions }
94*43a90889SApple OSS Distributions 
95*43a90889SApple OSS Distributions #define MCACHE_LIST_UNLOCK() {                          \
96*43a90889SApple OSS Distributions 	mcache_llock_owner = NULL;                      \
97*43a90889SApple OSS Distributions 	lck_mtx_unlock(&mcache_llock);                   \
98*43a90889SApple OSS Distributions }
99*43a90889SApple OSS Distributions 
100*43a90889SApple OSS Distributions #define MCACHE_LOCK(l)          lck_mtx_lock(l)
101*43a90889SApple OSS Distributions #define MCACHE_UNLOCK(l)        lck_mtx_unlock(l)
102*43a90889SApple OSS Distributions #define MCACHE_LOCK_TRY(l)      lck_mtx_try_lock(l)
103*43a90889SApple OSS Distributions 
104*43a90889SApple OSS Distributions static unsigned int ncpu;
105*43a90889SApple OSS Distributions static unsigned int cache_line_size;
106*43a90889SApple OSS Distributions static struct thread *mcache_llock_owner;
107*43a90889SApple OSS Distributions static LCK_GRP_DECLARE(mcache_llock_grp, "mcache.list");
108*43a90889SApple OSS Distributions static LCK_MTX_DECLARE(mcache_llock, &mcache_llock_grp);
109*43a90889SApple OSS Distributions static struct zone *mcache_zone;
110*43a90889SApple OSS Distributions static const uint32_t mcache_reap_interval = 15;
111*43a90889SApple OSS Distributions static const uint32_t mcache_reap_interval_leeway = 2;
112*43a90889SApple OSS Distributions static UInt32 mcache_reaping;
113*43a90889SApple OSS Distributions static int mcache_ready;
114*43a90889SApple OSS Distributions static int mcache_updating;
115*43a90889SApple OSS Distributions 
116*43a90889SApple OSS Distributions static int mcache_bkt_contention = 3;
117*43a90889SApple OSS Distributions #if DEBUG
118*43a90889SApple OSS Distributions static unsigned int mcache_flags = MCF_DEBUG;
119*43a90889SApple OSS Distributions #else
120*43a90889SApple OSS Distributions static unsigned int mcache_flags = 0;
121*43a90889SApple OSS Distributions #endif
122*43a90889SApple OSS Distributions 
123*43a90889SApple OSS Distributions int mca_trn_max = MCA_TRN_MAX;
124*43a90889SApple OSS Distributions 
125*43a90889SApple OSS Distributions static mcache_bkttype_t mcache_bkttype[] = {
126*43a90889SApple OSS Distributions 	{ 1, 4096, 32768, NULL },
127*43a90889SApple OSS Distributions 	{ 3, 2048, 16384, NULL },
128*43a90889SApple OSS Distributions 	{ 7, 1024, 12288, NULL },
129*43a90889SApple OSS Distributions 	{ 15, 256, 8192, NULL },
130*43a90889SApple OSS Distributions 	{ 31, 64, 4096, NULL },
131*43a90889SApple OSS Distributions 	{ 47, 0, 2048, NULL },
132*43a90889SApple OSS Distributions 	{ 63, 0, 1024, NULL },
133*43a90889SApple OSS Distributions 	{ 95, 0, 512, NULL },
134*43a90889SApple OSS Distributions 	{ 143, 0, 256, NULL },
135*43a90889SApple OSS Distributions 	{ 165, 0, 0, NULL },
136*43a90889SApple OSS Distributions };
137*43a90889SApple OSS Distributions 
138*43a90889SApple OSS Distributions static mcache_t *mcache_create_common(const char *, size_t, size_t,
139*43a90889SApple OSS Distributions     mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t,
140*43a90889SApple OSS Distributions     mcache_notifyfn_t, void *, u_int32_t, int);
141*43a90889SApple OSS Distributions static unsigned int mcache_slab_alloc(void *, mcache_obj_t ***,
142*43a90889SApple OSS Distributions     unsigned int, int);
143*43a90889SApple OSS Distributions static void mcache_slab_free(void *, mcache_obj_t *, boolean_t);
144*43a90889SApple OSS Distributions static void mcache_slab_audit(void *, mcache_obj_t *, boolean_t);
145*43a90889SApple OSS Distributions static void mcache_cpu_refill(mcache_cpu_t *, mcache_bkt_t *, int);
146*43a90889SApple OSS Distributions static void mcache_cpu_batch_refill(mcache_cpu_t *, mcache_bkt_t *, int);
147*43a90889SApple OSS Distributions static uint32_t mcache_bkt_batch_alloc(mcache_t *, mcache_bktlist_t *,
148*43a90889SApple OSS Distributions     mcache_bkt_t **, uint32_t);
149*43a90889SApple OSS Distributions static void mcache_bkt_batch_free(mcache_t *, mcache_bktlist_t *, mcache_bkt_t *);
150*43a90889SApple OSS Distributions static void mcache_cache_bkt_enable(mcache_t *);
151*43a90889SApple OSS Distributions static void mcache_bkt_purge(mcache_t *);
152*43a90889SApple OSS Distributions static void mcache_bkt_destroy(mcache_t *, mcache_bkt_t *, int);
153*43a90889SApple OSS Distributions static void mcache_bkt_ws_update(mcache_t *);
154*43a90889SApple OSS Distributions static void mcache_bkt_ws_zero(mcache_t *);
155*43a90889SApple OSS Distributions static void mcache_bkt_ws_reap(mcache_t *);
156*43a90889SApple OSS Distributions static void mcache_dispatch(void (*)(void *), void *);
157*43a90889SApple OSS Distributions static void mcache_cache_reap(mcache_t *);
158*43a90889SApple OSS Distributions static void mcache_cache_update(mcache_t *);
159*43a90889SApple OSS Distributions static void mcache_cache_bkt_resize(void *);
160*43a90889SApple OSS Distributions static void mcache_cache_enable(void *);
161*43a90889SApple OSS Distributions static void mcache_update(thread_call_param_t __unused, thread_call_param_t __unused);
162*43a90889SApple OSS Distributions static void mcache_update_timeout(void *);
163*43a90889SApple OSS Distributions static void mcache_applyall(void (*)(mcache_t *));
164*43a90889SApple OSS Distributions static void mcache_reap_start(void *);
165*43a90889SApple OSS Distributions static void mcache_reap_done(void *);
166*43a90889SApple OSS Distributions static void mcache_reap_timeout(thread_call_param_t __unused, thread_call_param_t);
167*43a90889SApple OSS Distributions static void mcache_notify(mcache_t *, u_int32_t);
168*43a90889SApple OSS Distributions static void mcache_purge(void *);
169*43a90889SApple OSS Distributions __attribute__((noreturn))
170*43a90889SApple OSS Distributions static void mcache_audit_panic(mcache_audit_t *mca, void *addr, size_t offset,
171*43a90889SApple OSS Distributions     int64_t expected, int64_t got);
172*43a90889SApple OSS Distributions 
173*43a90889SApple OSS Distributions static LIST_HEAD(, mcache) mcache_head;
174*43a90889SApple OSS Distributions mcache_t *mcache_audit_cache;
175*43a90889SApple OSS Distributions 
176*43a90889SApple OSS Distributions static thread_call_t mcache_reap_tcall;
177*43a90889SApple OSS Distributions static thread_call_t mcache_update_tcall;
178*43a90889SApple OSS Distributions 
179*43a90889SApple OSS Distributions /*
180*43a90889SApple OSS Distributions  * Initialize the framework; this is currently called as part of BSD init.
181*43a90889SApple OSS Distributions  */
182*43a90889SApple OSS Distributions __private_extern__ void
mcache_init(void)183*43a90889SApple OSS Distributions mcache_init(void)
184*43a90889SApple OSS Distributions {
185*43a90889SApple OSS Distributions 	mcache_bkttype_t *btp;
186*43a90889SApple OSS Distributions 	unsigned int i;
187*43a90889SApple OSS Distributions 	char name[32];
188*43a90889SApple OSS Distributions 
189*43a90889SApple OSS Distributions 	VERIFY(mca_trn_max >= 2);
190*43a90889SApple OSS Distributions 
191*43a90889SApple OSS Distributions 	ncpu = ml_wait_max_cpus();
192*43a90889SApple OSS Distributions 	(void) mcache_cache_line_size();        /* prime it */
193*43a90889SApple OSS Distributions 
194*43a90889SApple OSS Distributions 	mcache_reap_tcall = thread_call_allocate(mcache_reap_timeout, NULL);
195*43a90889SApple OSS Distributions 	mcache_update_tcall = thread_call_allocate(mcache_update, NULL);
196*43a90889SApple OSS Distributions 	if (mcache_reap_tcall == NULL || mcache_update_tcall == NULL) {
197*43a90889SApple OSS Distributions 		panic("mcache_init: thread_call_allocate failed");
198*43a90889SApple OSS Distributions 		/* NOTREACHED */
199*43a90889SApple OSS Distributions 		__builtin_unreachable();
200*43a90889SApple OSS Distributions 	}
201*43a90889SApple OSS Distributions 
202*43a90889SApple OSS Distributions 	mcache_zone = zone_create("mcache", MCACHE_ALLOC_SIZE,
203*43a90889SApple OSS Distributions 	    ZC_PGZ_USE_GUARDS | ZC_DESTRUCTIBLE);
204*43a90889SApple OSS Distributions 
205*43a90889SApple OSS Distributions 	LIST_INIT(&mcache_head);
206*43a90889SApple OSS Distributions 
207*43a90889SApple OSS Distributions 	for (i = 0; i < sizeof(mcache_bkttype) / sizeof(*btp); i++) {
208*43a90889SApple OSS Distributions 		btp = &mcache_bkttype[i];
209*43a90889SApple OSS Distributions 		(void) snprintf(name, sizeof(name), "bkt_%d",
210*43a90889SApple OSS Distributions 		    btp->bt_bktsize);
211*43a90889SApple OSS Distributions 		btp->bt_cache = mcache_create(name,
212*43a90889SApple OSS Distributions 		    (btp->bt_bktsize + 1) * sizeof(void *), 0, 0, MCR_SLEEP);
213*43a90889SApple OSS Distributions 	}
214*43a90889SApple OSS Distributions 
215*43a90889SApple OSS Distributions 	PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof(mcache_flags));
216*43a90889SApple OSS Distributions 	mcache_flags &= MCF_FLAGS_MASK;
217*43a90889SApple OSS Distributions 
218*43a90889SApple OSS Distributions 	mcache_audit_cache = mcache_create("audit", sizeof(mcache_audit_t),
219*43a90889SApple OSS Distributions 	    0, 0, MCR_SLEEP);
220*43a90889SApple OSS Distributions 
221*43a90889SApple OSS Distributions 	mcache_applyall(mcache_cache_bkt_enable);
222*43a90889SApple OSS Distributions 	mcache_ready = 1;
223*43a90889SApple OSS Distributions 
224*43a90889SApple OSS Distributions 	printf("mcache: %d CPU(s), %d bytes CPU cache line size\n",
225*43a90889SApple OSS Distributions 	    ncpu, CPU_CACHE_LINE_SIZE);
226*43a90889SApple OSS Distributions }
227*43a90889SApple OSS Distributions 
228*43a90889SApple OSS Distributions /*
229*43a90889SApple OSS Distributions  * Return the global mcache flags.
230*43a90889SApple OSS Distributions  */
231*43a90889SApple OSS Distributions __private_extern__ unsigned int
mcache_getflags(void)232*43a90889SApple OSS Distributions mcache_getflags(void)
233*43a90889SApple OSS Distributions {
234*43a90889SApple OSS Distributions 	return mcache_flags;
235*43a90889SApple OSS Distributions }
236*43a90889SApple OSS Distributions 
237*43a90889SApple OSS Distributions /*
238*43a90889SApple OSS Distributions  * Return the CPU cache line size.
239*43a90889SApple OSS Distributions  */
240*43a90889SApple OSS Distributions __private_extern__ unsigned int
mcache_cache_line_size(void)241*43a90889SApple OSS Distributions mcache_cache_line_size(void)
242*43a90889SApple OSS Distributions {
243*43a90889SApple OSS Distributions 	if (cache_line_size == 0) {
244*43a90889SApple OSS Distributions 		ml_cpu_info_t cpu_info;
245*43a90889SApple OSS Distributions 		ml_cpu_get_info(&cpu_info);
246*43a90889SApple OSS Distributions 		cache_line_size = (unsigned int)cpu_info.cache_line_size;
247*43a90889SApple OSS Distributions 	}
248*43a90889SApple OSS Distributions 	return cache_line_size;
249*43a90889SApple OSS Distributions }
250*43a90889SApple OSS Distributions 
251*43a90889SApple OSS Distributions /*
252*43a90889SApple OSS Distributions  * Create a cache using the zone allocator as the backend slab allocator.
253*43a90889SApple OSS Distributions  * The caller may specify any alignment for the object; if it specifies 0
254*43a90889SApple OSS Distributions  * the default alignment (MCACHE_ALIGN) will be used.
255*43a90889SApple OSS Distributions  */
256*43a90889SApple OSS Distributions __private_extern__ mcache_t *
mcache_create(const char * name,size_t bufsize,size_t align,u_int32_t flags,int wait __unused)257*43a90889SApple OSS Distributions mcache_create(const char *name, size_t bufsize, size_t align,
258*43a90889SApple OSS Distributions     u_int32_t flags, int wait __unused)
259*43a90889SApple OSS Distributions {
260*43a90889SApple OSS Distributions 	return mcache_create_common(name, bufsize, align, mcache_slab_alloc,
261*43a90889SApple OSS Distributions 	           mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1);
262*43a90889SApple OSS Distributions }
263*43a90889SApple OSS Distributions 
264*43a90889SApple OSS Distributions /*
265*43a90889SApple OSS Distributions  * Create a cache using a custom backend slab allocator.  Since the caller
266*43a90889SApple OSS Distributions  * is responsible for allocation, no alignment guarantee will be provided
267*43a90889SApple OSS Distributions  * by this framework.
268*43a90889SApple OSS Distributions  */
269*43a90889SApple OSS Distributions __private_extern__ mcache_t *
mcache_create_ext(const char * name,size_t bufsize,mcache_allocfn_t allocfn,mcache_freefn_t freefn,mcache_auditfn_t auditfn,mcache_logfn_t logfn,mcache_notifyfn_t notifyfn,void * arg,u_int32_t flags,int wait __unused)270*43a90889SApple OSS Distributions mcache_create_ext(const char *name, size_t bufsize,
271*43a90889SApple OSS Distributions     mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
272*43a90889SApple OSS Distributions     mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
273*43a90889SApple OSS Distributions     u_int32_t flags, int wait __unused)
274*43a90889SApple OSS Distributions {
275*43a90889SApple OSS Distributions 	return mcache_create_common(name, bufsize, 0, allocfn,
276*43a90889SApple OSS Distributions 	           freefn, auditfn, logfn, notifyfn, arg, flags, 0);
277*43a90889SApple OSS Distributions }
278*43a90889SApple OSS Distributions 
279*43a90889SApple OSS Distributions /*
280*43a90889SApple OSS Distributions  * Common cache creation routine.
281*43a90889SApple OSS Distributions  */
282*43a90889SApple OSS Distributions static mcache_t *
mcache_create_common(const char * name,size_t bufsize,size_t align,mcache_allocfn_t allocfn,mcache_freefn_t freefn,mcache_auditfn_t auditfn,mcache_logfn_t logfn,mcache_notifyfn_t notifyfn,void * arg,u_int32_t flags,int need_zone)283*43a90889SApple OSS Distributions mcache_create_common(const char *name, size_t bufsize, size_t align,
284*43a90889SApple OSS Distributions     mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
285*43a90889SApple OSS Distributions     mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
286*43a90889SApple OSS Distributions     u_int32_t flags, int need_zone)
287*43a90889SApple OSS Distributions {
288*43a90889SApple OSS Distributions 	mcache_bkttype_t *btp;
289*43a90889SApple OSS Distributions 	mcache_t *cp = NULL;
290*43a90889SApple OSS Distributions 	size_t chunksize;
291*43a90889SApple OSS Distributions 	void *buf, **pbuf;
292*43a90889SApple OSS Distributions 	unsigned int c;
293*43a90889SApple OSS Distributions 	char lck_name[64];
294*43a90889SApple OSS Distributions 
295*43a90889SApple OSS Distributions 	buf = zalloc_flags(mcache_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
296*43a90889SApple OSS Distributions 
297*43a90889SApple OSS Distributions 	/*
298*43a90889SApple OSS Distributions 	 * In case we didn't get a cache-aligned memory, round it up
299*43a90889SApple OSS Distributions 	 * accordingly.  This is needed in order to get the rest of
300*43a90889SApple OSS Distributions 	 * structure members aligned properly.  It also means that
301*43a90889SApple OSS Distributions 	 * the memory span gets shifted due to the round up, but it
302*43a90889SApple OSS Distributions 	 * is okay since we've allocated extra space for this.
303*43a90889SApple OSS Distributions 	 */
304*43a90889SApple OSS Distributions 	cp = (mcache_t *)
305*43a90889SApple OSS Distributions 	    P2ROUNDUP((intptr_t)buf + sizeof(void *), CPU_CACHE_LINE_SIZE);
306*43a90889SApple OSS Distributions 	pbuf = (void **)((intptr_t)cp - sizeof(void *));
307*43a90889SApple OSS Distributions 	*pbuf = buf;
308*43a90889SApple OSS Distributions 
309*43a90889SApple OSS Distributions 	/*
310*43a90889SApple OSS Distributions 	 * Guaranteed alignment is valid only when we use the internal
311*43a90889SApple OSS Distributions 	 * slab allocator (currently set to use the zone allocator).
312*43a90889SApple OSS Distributions 	 */
313*43a90889SApple OSS Distributions 	if (!need_zone) {
314*43a90889SApple OSS Distributions 		align = 1;
315*43a90889SApple OSS Distributions 	} else {
316*43a90889SApple OSS Distributions 		/* Enforce 64-bit minimum alignment for zone-based buffers */
317*43a90889SApple OSS Distributions 		if (align == 0) {
318*43a90889SApple OSS Distributions 			align = MCACHE_ALIGN;
319*43a90889SApple OSS Distributions 		}
320*43a90889SApple OSS Distributions 		align = P2ROUNDUP(align, MCACHE_ALIGN);
321*43a90889SApple OSS Distributions 	}
322*43a90889SApple OSS Distributions 
323*43a90889SApple OSS Distributions 	if ((align & (align - 1)) != 0) {
324*43a90889SApple OSS Distributions 		panic("mcache_create: bad alignment %lu", align);
325*43a90889SApple OSS Distributions 		/* NOTREACHED */
326*43a90889SApple OSS Distributions 		__builtin_unreachable();
327*43a90889SApple OSS Distributions 	}
328*43a90889SApple OSS Distributions 
329*43a90889SApple OSS Distributions 	cp->mc_align = align;
330*43a90889SApple OSS Distributions 	cp->mc_slab_alloc = allocfn;
331*43a90889SApple OSS Distributions 	cp->mc_slab_free = freefn;
332*43a90889SApple OSS Distributions 	cp->mc_slab_audit = auditfn;
333*43a90889SApple OSS Distributions 	cp->mc_slab_log = logfn;
334*43a90889SApple OSS Distributions 	cp->mc_slab_notify = notifyfn;
335*43a90889SApple OSS Distributions 	cp->mc_private = need_zone ? cp : arg;
336*43a90889SApple OSS Distributions 	cp->mc_bufsize = bufsize;
337*43a90889SApple OSS Distributions 	cp->mc_flags = (flags & MCF_FLAGS_MASK) | mcache_flags;
338*43a90889SApple OSS Distributions 
339*43a90889SApple OSS Distributions 	(void) snprintf(cp->mc_name, sizeof(cp->mc_name), "mcache.%s", name);
340*43a90889SApple OSS Distributions 
341*43a90889SApple OSS Distributions 	(void) snprintf(lck_name, sizeof(lck_name), "%s.cpu", cp->mc_name);
342*43a90889SApple OSS Distributions 	cp->mc_cpu_lock_grp = lck_grp_alloc_init(lck_name, LCK_GRP_ATTR_NULL);
343*43a90889SApple OSS Distributions 
344*43a90889SApple OSS Distributions 	/*
345*43a90889SApple OSS Distributions 	 * Allocation chunk size is the object's size plus any extra size
346*43a90889SApple OSS Distributions 	 * needed to satisfy the object's alignment.  It is enforced to be
347*43a90889SApple OSS Distributions 	 * at least the size of an LP64 pointer to simplify auditing and to
348*43a90889SApple OSS Distributions 	 * handle multiple-element allocation requests, where the elements
349*43a90889SApple OSS Distributions 	 * returned are linked together in a list.
350*43a90889SApple OSS Distributions 	 */
351*43a90889SApple OSS Distributions 	chunksize = MAX(bufsize, sizeof(u_int64_t));
352*43a90889SApple OSS Distributions 	if (need_zone) {
353*43a90889SApple OSS Distributions 		VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0);
354*43a90889SApple OSS Distributions 		chunksize += sizeof(uint64_t) + align;
355*43a90889SApple OSS Distributions 		chunksize = P2ROUNDUP(chunksize, align);
356*43a90889SApple OSS Distributions 		cp->mc_slab_zone = zone_create(cp->mc_name, chunksize,
357*43a90889SApple OSS Distributions 		    ZC_PGZ_USE_GUARDS | ZC_DESTRUCTIBLE);
358*43a90889SApple OSS Distributions 	}
359*43a90889SApple OSS Distributions 	cp->mc_chunksize = chunksize;
360*43a90889SApple OSS Distributions 
361*43a90889SApple OSS Distributions 	/*
362*43a90889SApple OSS Distributions 	 * Initialize the bucket layer.
363*43a90889SApple OSS Distributions 	 */
364*43a90889SApple OSS Distributions 	(void) snprintf(lck_name, sizeof(lck_name), "%s.bkt", cp->mc_name);
365*43a90889SApple OSS Distributions 	cp->mc_bkt_lock_grp = lck_grp_alloc_init(lck_name,
366*43a90889SApple OSS Distributions 	    LCK_GRP_ATTR_NULL);
367*43a90889SApple OSS Distributions 	lck_mtx_init(&cp->mc_bkt_lock, cp->mc_bkt_lock_grp, LCK_ATTR_NULL);
368*43a90889SApple OSS Distributions 
369*43a90889SApple OSS Distributions 	(void) snprintf(lck_name, sizeof(lck_name), "%s.sync", cp->mc_name);
370*43a90889SApple OSS Distributions 	cp->mc_sync_lock_grp = lck_grp_alloc_init(lck_name,
371*43a90889SApple OSS Distributions 	    LCK_GRP_ATTR_NULL);
372*43a90889SApple OSS Distributions 	lck_mtx_init(&cp->mc_sync_lock, cp->mc_sync_lock_grp, LCK_ATTR_NULL);
373*43a90889SApple OSS Distributions 
374*43a90889SApple OSS Distributions 	for (btp = mcache_bkttype; chunksize <= btp->bt_minbuf; btp++) {
375*43a90889SApple OSS Distributions 		continue;
376*43a90889SApple OSS Distributions 	}
377*43a90889SApple OSS Distributions 
378*43a90889SApple OSS Distributions 	cp->cache_bkttype = btp;
379*43a90889SApple OSS Distributions 
380*43a90889SApple OSS Distributions 	/*
381*43a90889SApple OSS Distributions 	 * Initialize the CPU layer.  Each per-CPU structure is aligned
382*43a90889SApple OSS Distributions 	 * on the CPU cache line boundary to prevent false sharing.
383*43a90889SApple OSS Distributions 	 */
384*43a90889SApple OSS Distributions 	for (c = 0; c < ncpu; c++) {
385*43a90889SApple OSS Distributions 		mcache_cpu_t *ccp = &cp->mc_cpu[c];
386*43a90889SApple OSS Distributions 
387*43a90889SApple OSS Distributions 		VERIFY(IS_P2ALIGNED(ccp, CPU_CACHE_LINE_SIZE));
388*43a90889SApple OSS Distributions 		lck_mtx_init(&ccp->cc_lock, cp->mc_cpu_lock_grp, LCK_ATTR_NULL);
389*43a90889SApple OSS Distributions 		ccp->cc_objs = -1;
390*43a90889SApple OSS Distributions 		ccp->cc_pobjs = -1;
391*43a90889SApple OSS Distributions 	}
392*43a90889SApple OSS Distributions 
393*43a90889SApple OSS Distributions 	if (mcache_ready) {
394*43a90889SApple OSS Distributions 		mcache_cache_bkt_enable(cp);
395*43a90889SApple OSS Distributions 	}
396*43a90889SApple OSS Distributions 
397*43a90889SApple OSS Distributions 	/* TODO: dynamically create sysctl for stats */
398*43a90889SApple OSS Distributions 
399*43a90889SApple OSS Distributions 	MCACHE_LIST_LOCK();
400*43a90889SApple OSS Distributions 	LIST_INSERT_HEAD(&mcache_head, cp, mc_list);
401*43a90889SApple OSS Distributions 	MCACHE_LIST_UNLOCK();
402*43a90889SApple OSS Distributions 
403*43a90889SApple OSS Distributions 	/*
404*43a90889SApple OSS Distributions 	 * If cache buckets are enabled and this is the first cache
405*43a90889SApple OSS Distributions 	 * created, start the periodic cache update.
406*43a90889SApple OSS Distributions 	 */
407*43a90889SApple OSS Distributions 	if (!(mcache_flags & MCF_NOCPUCACHE) && !mcache_updating) {
408*43a90889SApple OSS Distributions 		mcache_updating = 1;
409*43a90889SApple OSS Distributions 		mcache_update_timeout(NULL);
410*43a90889SApple OSS Distributions 	}
411*43a90889SApple OSS Distributions 	if (cp->mc_flags & MCF_DEBUG) {
412*43a90889SApple OSS Distributions 		printf("mcache_create: %s (%s) arg %p bufsize %lu align %lu "
413*43a90889SApple OSS Distributions 		    "chunksize %lu bktsize %d\n", name, need_zone ? "i" : "e",
414*43a90889SApple OSS Distributions 		    arg, bufsize, cp->mc_align, chunksize, btp->bt_bktsize);
415*43a90889SApple OSS Distributions 	}
416*43a90889SApple OSS Distributions 	return cp;
417*43a90889SApple OSS Distributions }
418*43a90889SApple OSS Distributions 
419*43a90889SApple OSS Distributions /*
420*43a90889SApple OSS Distributions  * Allocate one or more objects from a cache.
421*43a90889SApple OSS Distributions  */
422*43a90889SApple OSS Distributions __private_extern__ unsigned int
mcache_alloc_ext(mcache_t * cp,mcache_obj_t ** list,unsigned int num,int wait)423*43a90889SApple OSS Distributions mcache_alloc_ext(mcache_t *cp, mcache_obj_t **list, unsigned int num, int wait)
424*43a90889SApple OSS Distributions {
425*43a90889SApple OSS Distributions 	mcache_cpu_t *ccp;
426*43a90889SApple OSS Distributions 	mcache_obj_t **top = &(*list);
427*43a90889SApple OSS Distributions 	mcache_bkt_t *bkt;
428*43a90889SApple OSS Distributions 	unsigned int need = num;
429*43a90889SApple OSS Distributions 	boolean_t nwretry = FALSE;
430*43a90889SApple OSS Distributions 
431*43a90889SApple OSS Distributions 	/* MCR_NOSLEEP and MCR_FAILOK are mutually exclusive */
432*43a90889SApple OSS Distributions 	VERIFY((wait & (MCR_NOSLEEP | MCR_FAILOK)) != (MCR_NOSLEEP | MCR_FAILOK));
433*43a90889SApple OSS Distributions 
434*43a90889SApple OSS Distributions 	ASSERT(list != NULL);
435*43a90889SApple OSS Distributions 	*list = NULL;
436*43a90889SApple OSS Distributions 
437*43a90889SApple OSS Distributions 	if (num == 0) {
438*43a90889SApple OSS Distributions 		return 0;
439*43a90889SApple OSS Distributions 	}
440*43a90889SApple OSS Distributions 
441*43a90889SApple OSS Distributions retry_alloc:
442*43a90889SApple OSS Distributions 	/* We may not always be running in the same CPU in case of retries */
443*43a90889SApple OSS Distributions 	ccp = MCACHE_CPU(cp);
444*43a90889SApple OSS Distributions 
445*43a90889SApple OSS Distributions 	MCACHE_LOCK(&ccp->cc_lock);
446*43a90889SApple OSS Distributions 	for (;;) {
447*43a90889SApple OSS Distributions 		/*
448*43a90889SApple OSS Distributions 		 * If we have an object in the current CPU's filled bucket,
449*43a90889SApple OSS Distributions 		 * chain the object to any previous objects and return if
450*43a90889SApple OSS Distributions 		 * we've satisfied the number of requested objects.
451*43a90889SApple OSS Distributions 		 */
452*43a90889SApple OSS Distributions 		if (ccp->cc_objs > 0) {
453*43a90889SApple OSS Distributions 			mcache_obj_t *tail;
454*43a90889SApple OSS Distributions 			int objs;
455*43a90889SApple OSS Distributions 
456*43a90889SApple OSS Distributions 			/*
457*43a90889SApple OSS Distributions 			 * Objects in the bucket are already linked together
458*43a90889SApple OSS Distributions 			 * with the most recently freed object at the head of
459*43a90889SApple OSS Distributions 			 * the list; grab as many objects as we can.
460*43a90889SApple OSS Distributions 			 */
461*43a90889SApple OSS Distributions 			objs = MIN((unsigned int)ccp->cc_objs, need);
462*43a90889SApple OSS Distributions 			*list = ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
463*43a90889SApple OSS Distributions 			ccp->cc_objs -= objs;
464*43a90889SApple OSS Distributions 			ccp->cc_alloc += objs;
465*43a90889SApple OSS Distributions 
466*43a90889SApple OSS Distributions 			tail = ccp->cc_filled->bkt_obj[ccp->cc_objs];
467*43a90889SApple OSS Distributions 			list = &tail->obj_next;
468*43a90889SApple OSS Distributions 			*list = NULL;
469*43a90889SApple OSS Distributions 
470*43a90889SApple OSS Distributions 			/* If we got them all, return to caller */
471*43a90889SApple OSS Distributions 			if ((need -= objs) == 0) {
472*43a90889SApple OSS Distributions 				MCACHE_UNLOCK(&ccp->cc_lock);
473*43a90889SApple OSS Distributions 
474*43a90889SApple OSS Distributions 				if (!(cp->mc_flags & MCF_NOLEAKLOG) &&
475*43a90889SApple OSS Distributions 				    cp->mc_slab_log != NULL) {
476*43a90889SApple OSS Distributions 					(*cp->mc_slab_log)(num, *top, TRUE);
477*43a90889SApple OSS Distributions 				}
478*43a90889SApple OSS Distributions 
479*43a90889SApple OSS Distributions 				if (cp->mc_flags & MCF_DEBUG) {
480*43a90889SApple OSS Distributions 					goto debug_alloc;
481*43a90889SApple OSS Distributions 				}
482*43a90889SApple OSS Distributions 
483*43a90889SApple OSS Distributions 				return num;
484*43a90889SApple OSS Distributions 			}
485*43a90889SApple OSS Distributions 		}
486*43a90889SApple OSS Distributions 
487*43a90889SApple OSS Distributions 		/*
488*43a90889SApple OSS Distributions 		 * The CPU's filled bucket is empty.  If the previous filled
489*43a90889SApple OSS Distributions 		 * bucket was full, exchange and try again.
490*43a90889SApple OSS Distributions 		 */
491*43a90889SApple OSS Distributions 		if (ccp->cc_pobjs > 0) {
492*43a90889SApple OSS Distributions 			mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
493*43a90889SApple OSS Distributions 			continue;
494*43a90889SApple OSS Distributions 		}
495*43a90889SApple OSS Distributions 
496*43a90889SApple OSS Distributions 		/*
497*43a90889SApple OSS Distributions 		 * If the bucket layer is disabled, allocate from slab.  This
498*43a90889SApple OSS Distributions 		 * can happen either because MCF_NOCPUCACHE is set, or because
499*43a90889SApple OSS Distributions 		 * the bucket layer is currently being resized.
500*43a90889SApple OSS Distributions 		 */
501*43a90889SApple OSS Distributions 		if (ccp->cc_bktsize == 0) {
502*43a90889SApple OSS Distributions 			break;
503*43a90889SApple OSS Distributions 		}
504*43a90889SApple OSS Distributions 
505*43a90889SApple OSS Distributions 		/*
506*43a90889SApple OSS Distributions 		 * Both of the CPU's buckets are empty; try to get full
507*43a90889SApple OSS Distributions 		 * bucket(s) from the bucket layer.  Upon success, refill
508*43a90889SApple OSS Distributions 		 * this CPU and place any empty bucket into the empty list.
509*43a90889SApple OSS Distributions 		 * To prevent potential thrashing, replace both empty buckets
510*43a90889SApple OSS Distributions 		 * only if the requested count exceeds a bucket's worth of
511*43a90889SApple OSS Distributions 		 * objects.
512*43a90889SApple OSS Distributions 		 */
513*43a90889SApple OSS Distributions 		(void) mcache_bkt_batch_alloc(cp, &cp->mc_full,
514*43a90889SApple OSS Distributions 		    &bkt, (need <= ccp->cc_bktsize) ? 1 : 2);
515*43a90889SApple OSS Distributions 		if (bkt != NULL) {
516*43a90889SApple OSS Distributions 			mcache_bkt_t *bkt_list = NULL;
517*43a90889SApple OSS Distributions 
518*43a90889SApple OSS Distributions 			if (ccp->cc_pfilled != NULL) {
519*43a90889SApple OSS Distributions 				ccp->cc_pfilled->bkt_next = bkt_list;
520*43a90889SApple OSS Distributions 				bkt_list = ccp->cc_pfilled;
521*43a90889SApple OSS Distributions 			}
522*43a90889SApple OSS Distributions 			if (bkt->bkt_next == NULL) {
523*43a90889SApple OSS Distributions 				/*
524*43a90889SApple OSS Distributions 				 * Bucket layer allocation returns only 1
525*43a90889SApple OSS Distributions 				 * magazine; retain current empty magazine.
526*43a90889SApple OSS Distributions 				 */
527*43a90889SApple OSS Distributions 				mcache_cpu_refill(ccp, bkt, ccp->cc_bktsize);
528*43a90889SApple OSS Distributions 			} else {
529*43a90889SApple OSS Distributions 				/*
530*43a90889SApple OSS Distributions 				 * We got 2 full buckets from the bucket
531*43a90889SApple OSS Distributions 				 * layer; release the current empty bucket
532*43a90889SApple OSS Distributions 				 * back to the bucket layer.
533*43a90889SApple OSS Distributions 				 */
534*43a90889SApple OSS Distributions 				if (ccp->cc_filled != NULL) {
535*43a90889SApple OSS Distributions 					ccp->cc_filled->bkt_next = bkt_list;
536*43a90889SApple OSS Distributions 					bkt_list = ccp->cc_filled;
537*43a90889SApple OSS Distributions 				}
538*43a90889SApple OSS Distributions 				mcache_cpu_batch_refill(ccp, bkt,
539*43a90889SApple OSS Distributions 				    ccp->cc_bktsize);
540*43a90889SApple OSS Distributions 			}
541*43a90889SApple OSS Distributions 			mcache_bkt_batch_free(cp, &cp->mc_empty, bkt_list);
542*43a90889SApple OSS Distributions 			continue;
543*43a90889SApple OSS Distributions 		}
544*43a90889SApple OSS Distributions 
545*43a90889SApple OSS Distributions 		/*
546*43a90889SApple OSS Distributions 		 * The bucket layer has no full buckets; allocate the
547*43a90889SApple OSS Distributions 		 * object(s) directly from the slab layer.
548*43a90889SApple OSS Distributions 		 */
549*43a90889SApple OSS Distributions 		break;
550*43a90889SApple OSS Distributions 	}
551*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&ccp->cc_lock);
552*43a90889SApple OSS Distributions 
553*43a90889SApple OSS Distributions 	need -= (*cp->mc_slab_alloc)(cp->mc_private, &list, need, wait);
554*43a90889SApple OSS Distributions 
555*43a90889SApple OSS Distributions 	/*
556*43a90889SApple OSS Distributions 	 * If this is a blocking allocation, or if it is non-blocking and
557*43a90889SApple OSS Distributions 	 * the cache's full bucket is non-empty, then retry the allocation.
558*43a90889SApple OSS Distributions 	 */
559*43a90889SApple OSS Distributions 	if (need > 0) {
560*43a90889SApple OSS Distributions 		if (!(wait & MCR_NONBLOCKING)) {
561*43a90889SApple OSS Distributions 			os_atomic_inc(&cp->mc_wretry_cnt, relaxed);
562*43a90889SApple OSS Distributions 			goto retry_alloc;
563*43a90889SApple OSS Distributions 		} else if ((wait & (MCR_NOSLEEP | MCR_TRYHARD)) &&
564*43a90889SApple OSS Distributions 		    !mcache_bkt_isempty(cp)) {
565*43a90889SApple OSS Distributions 			if (!nwretry) {
566*43a90889SApple OSS Distributions 				nwretry = TRUE;
567*43a90889SApple OSS Distributions 			}
568*43a90889SApple OSS Distributions 			os_atomic_inc(&cp->mc_nwretry_cnt, relaxed);
569*43a90889SApple OSS Distributions 			goto retry_alloc;
570*43a90889SApple OSS Distributions 		} else if (nwretry) {
571*43a90889SApple OSS Distributions 			os_atomic_inc(&cp->mc_nwfail_cnt, relaxed);
572*43a90889SApple OSS Distributions 		}
573*43a90889SApple OSS Distributions 	}
574*43a90889SApple OSS Distributions 
575*43a90889SApple OSS Distributions 	if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) {
576*43a90889SApple OSS Distributions 		(*cp->mc_slab_log)((num - need), *top, TRUE);
577*43a90889SApple OSS Distributions 	}
578*43a90889SApple OSS Distributions 
579*43a90889SApple OSS Distributions 	if (!(cp->mc_flags & MCF_DEBUG)) {
580*43a90889SApple OSS Distributions 		return num - need;
581*43a90889SApple OSS Distributions 	}
582*43a90889SApple OSS Distributions 
583*43a90889SApple OSS Distributions debug_alloc:
584*43a90889SApple OSS Distributions 	if (cp->mc_flags & MCF_DEBUG) {
585*43a90889SApple OSS Distributions 		mcache_obj_t **o = top;
586*43a90889SApple OSS Distributions 		unsigned int n;
587*43a90889SApple OSS Distributions 
588*43a90889SApple OSS Distributions 		n = 0;
589*43a90889SApple OSS Distributions 		/*
590*43a90889SApple OSS Distributions 		 * Verify that the chain of objects have the same count as
591*43a90889SApple OSS Distributions 		 * what we are about to report to the caller.  Any mismatch
592*43a90889SApple OSS Distributions 		 * here means that the object list is insanely broken and
593*43a90889SApple OSS Distributions 		 * therefore we must panic.
594*43a90889SApple OSS Distributions 		 */
595*43a90889SApple OSS Distributions 		while (*o != NULL) {
596*43a90889SApple OSS Distributions 			o = &(*o)->obj_next;
597*43a90889SApple OSS Distributions 			++n;
598*43a90889SApple OSS Distributions 		}
599*43a90889SApple OSS Distributions 		if (n != (num - need)) {
600*43a90889SApple OSS Distributions 			panic("mcache_alloc_ext: %s cp %p corrupted list "
601*43a90889SApple OSS Distributions 			    "(got %d actual %d)\n", cp->mc_name,
602*43a90889SApple OSS Distributions 			    (void *)cp, num - need, n);
603*43a90889SApple OSS Distributions 			/* NOTREACHED */
604*43a90889SApple OSS Distributions 			__builtin_unreachable();
605*43a90889SApple OSS Distributions 		}
606*43a90889SApple OSS Distributions 	}
607*43a90889SApple OSS Distributions 
608*43a90889SApple OSS Distributions 	/* Invoke the slab layer audit callback if auditing is enabled */
609*43a90889SApple OSS Distributions 	if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) {
610*43a90889SApple OSS Distributions 		(*cp->mc_slab_audit)(cp->mc_private, *top, TRUE);
611*43a90889SApple OSS Distributions 	}
612*43a90889SApple OSS Distributions 
613*43a90889SApple OSS Distributions 	return num - need;
614*43a90889SApple OSS Distributions }
615*43a90889SApple OSS Distributions 
616*43a90889SApple OSS Distributions /*
617*43a90889SApple OSS Distributions  * Allocate a single object from a cache.
618*43a90889SApple OSS Distributions  */
619*43a90889SApple OSS Distributions __private_extern__ void *
mcache_alloc(mcache_t * cp,int wait)620*43a90889SApple OSS Distributions mcache_alloc(mcache_t *cp, int wait)
621*43a90889SApple OSS Distributions {
622*43a90889SApple OSS Distributions 	mcache_obj_t *buf;
623*43a90889SApple OSS Distributions 
624*43a90889SApple OSS Distributions 	(void) mcache_alloc_ext(cp, &buf, 1, wait);
625*43a90889SApple OSS Distributions 	return buf;
626*43a90889SApple OSS Distributions }
627*43a90889SApple OSS Distributions 
628*43a90889SApple OSS Distributions __private_extern__ void
mcache_waiter_inc(mcache_t * cp)629*43a90889SApple OSS Distributions mcache_waiter_inc(mcache_t *cp)
630*43a90889SApple OSS Distributions {
631*43a90889SApple OSS Distributions 	os_atomic_inc(&cp->mc_waiter_cnt, relaxed);
632*43a90889SApple OSS Distributions }
633*43a90889SApple OSS Distributions 
634*43a90889SApple OSS Distributions __private_extern__ void
mcache_waiter_dec(mcache_t * cp)635*43a90889SApple OSS Distributions mcache_waiter_dec(mcache_t *cp)
636*43a90889SApple OSS Distributions {
637*43a90889SApple OSS Distributions 	os_atomic_dec(&cp->mc_waiter_cnt, relaxed);
638*43a90889SApple OSS Distributions }
639*43a90889SApple OSS Distributions 
640*43a90889SApple OSS Distributions __private_extern__ boolean_t
mcache_bkt_isempty(mcache_t * cp)641*43a90889SApple OSS Distributions mcache_bkt_isempty(mcache_t *cp)
642*43a90889SApple OSS Distributions {
643*43a90889SApple OSS Distributions 	/*
644*43a90889SApple OSS Distributions 	 * This isn't meant to accurately tell whether there are
645*43a90889SApple OSS Distributions 	 * any full buckets in the cache; it is simply a way to
646*43a90889SApple OSS Distributions 	 * obtain "hints" about the state of the cache.
647*43a90889SApple OSS Distributions 	 */
648*43a90889SApple OSS Distributions 	return cp->mc_full.bl_total == 0;
649*43a90889SApple OSS Distributions }
650*43a90889SApple OSS Distributions 
651*43a90889SApple OSS Distributions /*
652*43a90889SApple OSS Distributions  * Notify the slab layer about an event.
653*43a90889SApple OSS Distributions  */
654*43a90889SApple OSS Distributions static void
mcache_notify(mcache_t * cp,u_int32_t event)655*43a90889SApple OSS Distributions mcache_notify(mcache_t *cp, u_int32_t event)
656*43a90889SApple OSS Distributions {
657*43a90889SApple OSS Distributions 	if (cp->mc_slab_notify != NULL) {
658*43a90889SApple OSS Distributions 		(*cp->mc_slab_notify)(cp->mc_private, event);
659*43a90889SApple OSS Distributions 	}
660*43a90889SApple OSS Distributions }
661*43a90889SApple OSS Distributions 
662*43a90889SApple OSS Distributions /*
663*43a90889SApple OSS Distributions  * Purge the cache and disable its buckets.
664*43a90889SApple OSS Distributions  */
665*43a90889SApple OSS Distributions static void
mcache_purge(void * arg)666*43a90889SApple OSS Distributions mcache_purge(void *arg)
667*43a90889SApple OSS Distributions {
668*43a90889SApple OSS Distributions 	mcache_t *cp = arg;
669*43a90889SApple OSS Distributions 
670*43a90889SApple OSS Distributions 	mcache_bkt_purge(cp);
671*43a90889SApple OSS Distributions 	/*
672*43a90889SApple OSS Distributions 	 * We cannot simply call mcache_cache_bkt_enable() from here as
673*43a90889SApple OSS Distributions 	 * a bucket resize may be in flight and we would cause the CPU
674*43a90889SApple OSS Distributions 	 * layers of the cache to point to different sizes.  Therefore,
675*43a90889SApple OSS Distributions 	 * we simply increment the enable count so that during the next
676*43a90889SApple OSS Distributions 	 * periodic cache update the buckets can be reenabled.
677*43a90889SApple OSS Distributions 	 */
678*43a90889SApple OSS Distributions 	lck_mtx_lock_spin(&cp->mc_sync_lock);
679*43a90889SApple OSS Distributions 	cp->mc_enable_cnt++;
680*43a90889SApple OSS Distributions 	lck_mtx_unlock(&cp->mc_sync_lock);
681*43a90889SApple OSS Distributions }
682*43a90889SApple OSS Distributions 
683*43a90889SApple OSS Distributions __private_extern__ boolean_t
mcache_purge_cache(mcache_t * cp,boolean_t async)684*43a90889SApple OSS Distributions mcache_purge_cache(mcache_t *cp, boolean_t async)
685*43a90889SApple OSS Distributions {
686*43a90889SApple OSS Distributions 	/*
687*43a90889SApple OSS Distributions 	 * Purging a cache that has no per-CPU caches or is already
688*43a90889SApple OSS Distributions 	 * in the process of being purged is rather pointless.
689*43a90889SApple OSS Distributions 	 */
690*43a90889SApple OSS Distributions 	if (cp->mc_flags & MCF_NOCPUCACHE) {
691*43a90889SApple OSS Distributions 		return FALSE;
692*43a90889SApple OSS Distributions 	}
693*43a90889SApple OSS Distributions 
694*43a90889SApple OSS Distributions 	lck_mtx_lock_spin(&cp->mc_sync_lock);
695*43a90889SApple OSS Distributions 	if (cp->mc_purge_cnt > 0) {
696*43a90889SApple OSS Distributions 		lck_mtx_unlock(&cp->mc_sync_lock);
697*43a90889SApple OSS Distributions 		return FALSE;
698*43a90889SApple OSS Distributions 	}
699*43a90889SApple OSS Distributions 	cp->mc_purge_cnt++;
700*43a90889SApple OSS Distributions 	lck_mtx_unlock(&cp->mc_sync_lock);
701*43a90889SApple OSS Distributions 
702*43a90889SApple OSS Distributions 	if (async) {
703*43a90889SApple OSS Distributions 		mcache_dispatch(mcache_purge, cp);
704*43a90889SApple OSS Distributions 	} else {
705*43a90889SApple OSS Distributions 		mcache_purge(cp);
706*43a90889SApple OSS Distributions 	}
707*43a90889SApple OSS Distributions 
708*43a90889SApple OSS Distributions 	return TRUE;
709*43a90889SApple OSS Distributions }
710*43a90889SApple OSS Distributions 
711*43a90889SApple OSS Distributions /*
712*43a90889SApple OSS Distributions  * Free a single object to a cache.
713*43a90889SApple OSS Distributions  */
714*43a90889SApple OSS Distributions __private_extern__ void
mcache_free(mcache_t * cp,void * buf)715*43a90889SApple OSS Distributions mcache_free(mcache_t *cp, void *buf)
716*43a90889SApple OSS Distributions {
717*43a90889SApple OSS Distributions 	((mcache_obj_t *)buf)->obj_next = NULL;
718*43a90889SApple OSS Distributions 	mcache_free_ext(cp, (mcache_obj_t *)buf);
719*43a90889SApple OSS Distributions }
720*43a90889SApple OSS Distributions 
721*43a90889SApple OSS Distributions /*
722*43a90889SApple OSS Distributions  * Free one or more objects to a cache.
723*43a90889SApple OSS Distributions  */
724*43a90889SApple OSS Distributions __private_extern__ void
mcache_free_ext(mcache_t * cp,mcache_obj_t * list)725*43a90889SApple OSS Distributions mcache_free_ext(mcache_t *cp, mcache_obj_t *list)
726*43a90889SApple OSS Distributions {
727*43a90889SApple OSS Distributions 	mcache_cpu_t *ccp = MCACHE_CPU(cp);
728*43a90889SApple OSS Distributions 	mcache_bkttype_t *btp;
729*43a90889SApple OSS Distributions 	mcache_obj_t *nlist;
730*43a90889SApple OSS Distributions 	mcache_bkt_t *bkt;
731*43a90889SApple OSS Distributions 
732*43a90889SApple OSS Distributions 	if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) {
733*43a90889SApple OSS Distributions 		(*cp->mc_slab_log)(0, list, FALSE);
734*43a90889SApple OSS Distributions 	}
735*43a90889SApple OSS Distributions 
736*43a90889SApple OSS Distributions 	/* Invoke the slab layer audit callback if auditing is enabled */
737*43a90889SApple OSS Distributions 	if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) {
738*43a90889SApple OSS Distributions 		(*cp->mc_slab_audit)(cp->mc_private, list, FALSE);
739*43a90889SApple OSS Distributions 	}
740*43a90889SApple OSS Distributions 
741*43a90889SApple OSS Distributions 	MCACHE_LOCK(&ccp->cc_lock);
742*43a90889SApple OSS Distributions 	for (;;) {
743*43a90889SApple OSS Distributions 		/*
744*43a90889SApple OSS Distributions 		 * If there is space in the current CPU's filled bucket, put
745*43a90889SApple OSS Distributions 		 * the object there and return once all objects are freed.
746*43a90889SApple OSS Distributions 		 * Note the cast to unsigned integer takes care of the case
747*43a90889SApple OSS Distributions 		 * where the bucket layer is disabled (when cc_objs is -1).
748*43a90889SApple OSS Distributions 		 */
749*43a90889SApple OSS Distributions 		if ((unsigned int)ccp->cc_objs <
750*43a90889SApple OSS Distributions 		    (unsigned int)ccp->cc_bktsize) {
751*43a90889SApple OSS Distributions 			/*
752*43a90889SApple OSS Distributions 			 * Reverse the list while we place the object into the
753*43a90889SApple OSS Distributions 			 * bucket; this effectively causes the most recently
754*43a90889SApple OSS Distributions 			 * freed object(s) to be reused during allocation.
755*43a90889SApple OSS Distributions 			 */
756*43a90889SApple OSS Distributions 			nlist = list->obj_next;
757*43a90889SApple OSS Distributions 			list->obj_next = (ccp->cc_objs == 0) ? NULL :
758*43a90889SApple OSS Distributions 			    ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
759*43a90889SApple OSS Distributions 			ccp->cc_filled->bkt_obj[ccp->cc_objs++] = list;
760*43a90889SApple OSS Distributions 			ccp->cc_free++;
761*43a90889SApple OSS Distributions 
762*43a90889SApple OSS Distributions 			if ((list = nlist) != NULL) {
763*43a90889SApple OSS Distributions 				continue;
764*43a90889SApple OSS Distributions 			}
765*43a90889SApple OSS Distributions 
766*43a90889SApple OSS Distributions 			/* We are done; return to caller */
767*43a90889SApple OSS Distributions 			MCACHE_UNLOCK(&ccp->cc_lock);
768*43a90889SApple OSS Distributions 
769*43a90889SApple OSS Distributions 			/* If there is a waiter below, notify it */
770*43a90889SApple OSS Distributions 			if (cp->mc_waiter_cnt > 0) {
771*43a90889SApple OSS Distributions 				mcache_notify(cp, MCN_RETRYALLOC);
772*43a90889SApple OSS Distributions 			}
773*43a90889SApple OSS Distributions 			return;
774*43a90889SApple OSS Distributions 		}
775*43a90889SApple OSS Distributions 
776*43a90889SApple OSS Distributions 		/*
777*43a90889SApple OSS Distributions 		 * The CPU's filled bucket is full.  If the previous filled
778*43a90889SApple OSS Distributions 		 * bucket was empty, exchange and try again.
779*43a90889SApple OSS Distributions 		 */
780*43a90889SApple OSS Distributions 		if (ccp->cc_pobjs == 0) {
781*43a90889SApple OSS Distributions 			mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
782*43a90889SApple OSS Distributions 			continue;
783*43a90889SApple OSS Distributions 		}
784*43a90889SApple OSS Distributions 
785*43a90889SApple OSS Distributions 		/*
786*43a90889SApple OSS Distributions 		 * If the bucket layer is disabled, free to slab.  This can
787*43a90889SApple OSS Distributions 		 * happen either because MCF_NOCPUCACHE is set, or because
788*43a90889SApple OSS Distributions 		 * the bucket layer is currently being resized.
789*43a90889SApple OSS Distributions 		 */
790*43a90889SApple OSS Distributions 		if (ccp->cc_bktsize == 0) {
791*43a90889SApple OSS Distributions 			break;
792*43a90889SApple OSS Distributions 		}
793*43a90889SApple OSS Distributions 
794*43a90889SApple OSS Distributions 		/*
795*43a90889SApple OSS Distributions 		 * Both of the CPU's buckets are full; try to get empty
796*43a90889SApple OSS Distributions 		 * buckets from the bucket layer.  Upon success, empty this
797*43a90889SApple OSS Distributions 		 * CPU and place any full bucket into the full list.
798*43a90889SApple OSS Distributions 		 *
799*43a90889SApple OSS Distributions 		 * TODO: Because the caller currently doesn't indicate
800*43a90889SApple OSS Distributions 		 * the number of objects in the list, we choose the more
801*43a90889SApple OSS Distributions 		 * conservative approach of allocating only 1 empty
802*43a90889SApple OSS Distributions 		 * bucket (to prevent potential thrashing).  Once we
803*43a90889SApple OSS Distributions 		 * have the object count, we can replace 1 with similar
804*43a90889SApple OSS Distributions 		 * logic as used in mcache_alloc_ext().
805*43a90889SApple OSS Distributions 		 */
806*43a90889SApple OSS Distributions 		(void) mcache_bkt_batch_alloc(cp, &cp->mc_empty, &bkt, 1);
807*43a90889SApple OSS Distributions 		if (bkt != NULL) {
808*43a90889SApple OSS Distributions 			mcache_bkt_t *bkt_list = NULL;
809*43a90889SApple OSS Distributions 
810*43a90889SApple OSS Distributions 			if (ccp->cc_pfilled != NULL) {
811*43a90889SApple OSS Distributions 				ccp->cc_pfilled->bkt_next = bkt_list;
812*43a90889SApple OSS Distributions 				bkt_list = ccp->cc_pfilled;
813*43a90889SApple OSS Distributions 			}
814*43a90889SApple OSS Distributions 			if (bkt->bkt_next == NULL) {
815*43a90889SApple OSS Distributions 				/*
816*43a90889SApple OSS Distributions 				 * Bucket layer allocation returns only 1
817*43a90889SApple OSS Distributions 				 * bucket; retain current full bucket.
818*43a90889SApple OSS Distributions 				 */
819*43a90889SApple OSS Distributions 				mcache_cpu_refill(ccp, bkt, 0);
820*43a90889SApple OSS Distributions 			} else {
821*43a90889SApple OSS Distributions 				/*
822*43a90889SApple OSS Distributions 				 * We got 2 empty buckets from the bucket
823*43a90889SApple OSS Distributions 				 * layer; release the current full bucket
824*43a90889SApple OSS Distributions 				 * back to the bucket layer.
825*43a90889SApple OSS Distributions 				 */
826*43a90889SApple OSS Distributions 				if (ccp->cc_filled != NULL) {
827*43a90889SApple OSS Distributions 					ccp->cc_filled->bkt_next = bkt_list;
828*43a90889SApple OSS Distributions 					bkt_list = ccp->cc_filled;
829*43a90889SApple OSS Distributions 				}
830*43a90889SApple OSS Distributions 				mcache_cpu_batch_refill(ccp, bkt, 0);
831*43a90889SApple OSS Distributions 			}
832*43a90889SApple OSS Distributions 			mcache_bkt_batch_free(cp, &cp->mc_full, bkt_list);
833*43a90889SApple OSS Distributions 			continue;
834*43a90889SApple OSS Distributions 		}
835*43a90889SApple OSS Distributions 		btp = cp->cache_bkttype;
836*43a90889SApple OSS Distributions 
837*43a90889SApple OSS Distributions 		/*
838*43a90889SApple OSS Distributions 		 * We need an empty bucket to put our freed objects into
839*43a90889SApple OSS Distributions 		 * but couldn't get an empty bucket from the bucket layer;
840*43a90889SApple OSS Distributions 		 * attempt to allocate one.  We do not want to block for
841*43a90889SApple OSS Distributions 		 * allocation here, and if the bucket allocation fails
842*43a90889SApple OSS Distributions 		 * we will simply fall through to the slab layer.
843*43a90889SApple OSS Distributions 		 */
844*43a90889SApple OSS Distributions 		MCACHE_UNLOCK(&ccp->cc_lock);
845*43a90889SApple OSS Distributions 		bkt = mcache_alloc(btp->bt_cache, MCR_NOSLEEP);
846*43a90889SApple OSS Distributions 		MCACHE_LOCK(&ccp->cc_lock);
847*43a90889SApple OSS Distributions 
848*43a90889SApple OSS Distributions 		if (bkt != NULL) {
849*43a90889SApple OSS Distributions 			/*
850*43a90889SApple OSS Distributions 			 * We have an empty bucket, but since we drop the
851*43a90889SApple OSS Distributions 			 * CPU lock above, the cache's bucket size may have
852*43a90889SApple OSS Distributions 			 * changed.  If so, free the bucket and try again.
853*43a90889SApple OSS Distributions 			 */
854*43a90889SApple OSS Distributions 			if (ccp->cc_bktsize != btp->bt_bktsize) {
855*43a90889SApple OSS Distributions 				MCACHE_UNLOCK(&ccp->cc_lock);
856*43a90889SApple OSS Distributions 				mcache_free(btp->bt_cache, bkt);
857*43a90889SApple OSS Distributions 				MCACHE_LOCK(&ccp->cc_lock);
858*43a90889SApple OSS Distributions 				continue;
859*43a90889SApple OSS Distributions 			}
860*43a90889SApple OSS Distributions 
861*43a90889SApple OSS Distributions 			/*
862*43a90889SApple OSS Distributions 			 * Store it in the bucket object since we'll
863*43a90889SApple OSS Distributions 			 * need to refer to it during bucket destroy;
864*43a90889SApple OSS Distributions 			 * we can't safely refer to cache_bkttype as
865*43a90889SApple OSS Distributions 			 * the bucket lock may not be acquired then.
866*43a90889SApple OSS Distributions 			 */
867*43a90889SApple OSS Distributions 			bkt->bkt_type = btp;
868*43a90889SApple OSS Distributions 
869*43a90889SApple OSS Distributions 			/*
870*43a90889SApple OSS Distributions 			 * We have an empty bucket of the right size;
871*43a90889SApple OSS Distributions 			 * add it to the bucket layer and try again.
872*43a90889SApple OSS Distributions 			 */
873*43a90889SApple OSS Distributions 			ASSERT(bkt->bkt_next == NULL);
874*43a90889SApple OSS Distributions 			mcache_bkt_batch_free(cp, &cp->mc_empty, bkt);
875*43a90889SApple OSS Distributions 			continue;
876*43a90889SApple OSS Distributions 		}
877*43a90889SApple OSS Distributions 
878*43a90889SApple OSS Distributions 		/*
879*43a90889SApple OSS Distributions 		 * The bucket layer has no empty buckets; free the
880*43a90889SApple OSS Distributions 		 * object(s) directly to the slab layer.
881*43a90889SApple OSS Distributions 		 */
882*43a90889SApple OSS Distributions 		break;
883*43a90889SApple OSS Distributions 	}
884*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&ccp->cc_lock);
885*43a90889SApple OSS Distributions 
886*43a90889SApple OSS Distributions 	/* If there is a waiter below, notify it */
887*43a90889SApple OSS Distributions 	if (cp->mc_waiter_cnt > 0) {
888*43a90889SApple OSS Distributions 		mcache_notify(cp, MCN_RETRYALLOC);
889*43a90889SApple OSS Distributions 	}
890*43a90889SApple OSS Distributions 
891*43a90889SApple OSS Distributions 	/* Advise the slab layer to purge the object(s) */
892*43a90889SApple OSS Distributions 	(*cp->mc_slab_free)(cp->mc_private, list,
893*43a90889SApple OSS Distributions 	    (cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
894*43a90889SApple OSS Distributions }
895*43a90889SApple OSS Distributions 
896*43a90889SApple OSS Distributions /*
897*43a90889SApple OSS Distributions  * Cache destruction routine.
898*43a90889SApple OSS Distributions  */
899*43a90889SApple OSS Distributions __private_extern__ void
mcache_destroy(mcache_t * cp)900*43a90889SApple OSS Distributions mcache_destroy(mcache_t *cp)
901*43a90889SApple OSS Distributions {
902*43a90889SApple OSS Distributions 	void **pbuf;
903*43a90889SApple OSS Distributions 
904*43a90889SApple OSS Distributions 	MCACHE_LIST_LOCK();
905*43a90889SApple OSS Distributions 	LIST_REMOVE(cp, mc_list);
906*43a90889SApple OSS Distributions 	MCACHE_LIST_UNLOCK();
907*43a90889SApple OSS Distributions 
908*43a90889SApple OSS Distributions 	mcache_bkt_purge(cp);
909*43a90889SApple OSS Distributions 
910*43a90889SApple OSS Distributions 	/*
911*43a90889SApple OSS Distributions 	 * This cache is dead; there should be no further transaction.
912*43a90889SApple OSS Distributions 	 * If it's still invoked, make sure that it induces a fault.
913*43a90889SApple OSS Distributions 	 */
914*43a90889SApple OSS Distributions 	cp->mc_slab_alloc = NULL;
915*43a90889SApple OSS Distributions 	cp->mc_slab_free = NULL;
916*43a90889SApple OSS Distributions 	cp->mc_slab_audit = NULL;
917*43a90889SApple OSS Distributions 
918*43a90889SApple OSS Distributions 	lck_grp_free(cp->mc_bkt_lock_grp);
919*43a90889SApple OSS Distributions 	lck_grp_free(cp->mc_cpu_lock_grp);
920*43a90889SApple OSS Distributions 	lck_grp_free(cp->mc_sync_lock_grp);
921*43a90889SApple OSS Distributions 
922*43a90889SApple OSS Distributions 	/*
923*43a90889SApple OSS Distributions 	 * TODO: We need to destroy the zone here, but cannot do it
924*43a90889SApple OSS Distributions 	 * because there is no such way to achieve that.  Until then
925*43a90889SApple OSS Distributions 	 * the memory allocated for the zone structure is leaked.
926*43a90889SApple OSS Distributions 	 * Once it is achievable, uncomment these lines:
927*43a90889SApple OSS Distributions 	 *
928*43a90889SApple OSS Distributions 	 *	if (cp->mc_slab_zone != NULL) {
929*43a90889SApple OSS Distributions 	 *		zdestroy(cp->mc_slab_zone);
930*43a90889SApple OSS Distributions 	 *		cp->mc_slab_zone = NULL;
931*43a90889SApple OSS Distributions 	 *	}
932*43a90889SApple OSS Distributions 	 */
933*43a90889SApple OSS Distributions 
934*43a90889SApple OSS Distributions 	/* Get the original address since we're about to free it */
935*43a90889SApple OSS Distributions 	pbuf = (void **)((intptr_t)cp - sizeof(void *));
936*43a90889SApple OSS Distributions 
937*43a90889SApple OSS Distributions 	zfree(mcache_zone, *pbuf);
938*43a90889SApple OSS Distributions }
939*43a90889SApple OSS Distributions 
940*43a90889SApple OSS Distributions /*
941*43a90889SApple OSS Distributions  * Internal slab allocator used as a backend for simple caches.  The current
942*43a90889SApple OSS Distributions  * implementation uses the zone allocator for simplicity reasons.
943*43a90889SApple OSS Distributions  */
944*43a90889SApple OSS Distributions static unsigned int
mcache_slab_alloc(void * arg,mcache_obj_t *** plist,unsigned int num,int wait)945*43a90889SApple OSS Distributions mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num,
946*43a90889SApple OSS Distributions     int wait)
947*43a90889SApple OSS Distributions {
948*43a90889SApple OSS Distributions #pragma unused(wait)
949*43a90889SApple OSS Distributions 	mcache_t *cp = arg;
950*43a90889SApple OSS Distributions 	unsigned int need = num;
951*43a90889SApple OSS Distributions 	size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
952*43a90889SApple OSS Distributions 	u_int32_t flags = cp->mc_flags;
953*43a90889SApple OSS Distributions 	void *buf, *base, **pbuf;
954*43a90889SApple OSS Distributions 	mcache_obj_t **list = *plist;
955*43a90889SApple OSS Distributions 
956*43a90889SApple OSS Distributions 	*list = NULL;
957*43a90889SApple OSS Distributions 
958*43a90889SApple OSS Distributions 	for (;;) {
959*43a90889SApple OSS Distributions 		buf = zalloc_flags(cp->mc_slab_zone, Z_WAITOK | Z_NOFAIL);
960*43a90889SApple OSS Distributions 
961*43a90889SApple OSS Distributions 		/* Get the aligned base address for this object */
962*43a90889SApple OSS Distributions 		base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t),
963*43a90889SApple OSS Distributions 		    cp->mc_align);
964*43a90889SApple OSS Distributions 
965*43a90889SApple OSS Distributions 		/*
966*43a90889SApple OSS Distributions 		 * Wind back a pointer size from the aligned base and
967*43a90889SApple OSS Distributions 		 * save the original address so we can free it later.
968*43a90889SApple OSS Distributions 		 */
969*43a90889SApple OSS Distributions 		pbuf = (void **)((intptr_t)base - sizeof(void *));
970*43a90889SApple OSS Distributions 		*pbuf = buf;
971*43a90889SApple OSS Distributions 
972*43a90889SApple OSS Distributions 		VERIFY(((intptr_t)base + cp->mc_bufsize) <=
973*43a90889SApple OSS Distributions 		    ((intptr_t)buf + cp->mc_chunksize));
974*43a90889SApple OSS Distributions 
975*43a90889SApple OSS Distributions 		/*
976*43a90889SApple OSS Distributions 		 * If auditing is enabled, patternize the contents of
977*43a90889SApple OSS Distributions 		 * the buffer starting from the 64-bit aligned base to
978*43a90889SApple OSS Distributions 		 * the end of the buffer; the length is rounded up to
979*43a90889SApple OSS Distributions 		 * the nearest 64-bit multiply; this is because we use
980*43a90889SApple OSS Distributions 		 * 64-bit memory access to set/check the pattern.
981*43a90889SApple OSS Distributions 		 */
982*43a90889SApple OSS Distributions 		if (flags & MCF_DEBUG) {
983*43a90889SApple OSS Distributions 			VERIFY(((intptr_t)base + rsize) <=
984*43a90889SApple OSS Distributions 			    ((intptr_t)buf + cp->mc_chunksize));
985*43a90889SApple OSS Distributions 			mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
986*43a90889SApple OSS Distributions 		}
987*43a90889SApple OSS Distributions 
988*43a90889SApple OSS Distributions 		VERIFY(IS_P2ALIGNED(base, cp->mc_align));
989*43a90889SApple OSS Distributions 		*list = (mcache_obj_t *)base;
990*43a90889SApple OSS Distributions 
991*43a90889SApple OSS Distributions 		(*list)->obj_next = NULL;
992*43a90889SApple OSS Distributions 		list = *plist = &(*list)->obj_next;
993*43a90889SApple OSS Distributions 
994*43a90889SApple OSS Distributions 		/* If we got them all, return to mcache */
995*43a90889SApple OSS Distributions 		if (--need == 0) {
996*43a90889SApple OSS Distributions 			break;
997*43a90889SApple OSS Distributions 		}
998*43a90889SApple OSS Distributions 	}
999*43a90889SApple OSS Distributions 
1000*43a90889SApple OSS Distributions 	return num - need;
1001*43a90889SApple OSS Distributions }
1002*43a90889SApple OSS Distributions 
1003*43a90889SApple OSS Distributions /*
1004*43a90889SApple OSS Distributions  * Internal slab deallocator used as a backend for simple caches.
1005*43a90889SApple OSS Distributions  */
1006*43a90889SApple OSS Distributions static void
mcache_slab_free(void * arg,mcache_obj_t * list,__unused boolean_t purged)1007*43a90889SApple OSS Distributions mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged)
1008*43a90889SApple OSS Distributions {
1009*43a90889SApple OSS Distributions 	mcache_t *cp = arg;
1010*43a90889SApple OSS Distributions 	mcache_obj_t *nlist;
1011*43a90889SApple OSS Distributions 	size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
1012*43a90889SApple OSS Distributions 	u_int32_t flags = cp->mc_flags;
1013*43a90889SApple OSS Distributions 	void *base;
1014*43a90889SApple OSS Distributions 	void **pbuf;
1015*43a90889SApple OSS Distributions 
1016*43a90889SApple OSS Distributions 	for (;;) {
1017*43a90889SApple OSS Distributions 		nlist = list->obj_next;
1018*43a90889SApple OSS Distributions 		list->obj_next = NULL;
1019*43a90889SApple OSS Distributions 
1020*43a90889SApple OSS Distributions 		base = list;
1021*43a90889SApple OSS Distributions 		VERIFY(IS_P2ALIGNED(base, cp->mc_align));
1022*43a90889SApple OSS Distributions 
1023*43a90889SApple OSS Distributions 		/* Get the original address since we're about to free it */
1024*43a90889SApple OSS Distributions 		pbuf = (void **)((intptr_t)base - sizeof(void *));
1025*43a90889SApple OSS Distributions 
1026*43a90889SApple OSS Distributions 		VERIFY(((intptr_t)base + cp->mc_bufsize) <=
1027*43a90889SApple OSS Distributions 		    ((intptr_t)*pbuf + cp->mc_chunksize));
1028*43a90889SApple OSS Distributions 
1029*43a90889SApple OSS Distributions 		if (flags & MCF_DEBUG) {
1030*43a90889SApple OSS Distributions 			VERIFY(((intptr_t)base + rsize) <=
1031*43a90889SApple OSS Distributions 			    ((intptr_t)*pbuf + cp->mc_chunksize));
1032*43a90889SApple OSS Distributions 			mcache_audit_free_verify(NULL, base, 0, rsize);
1033*43a90889SApple OSS Distributions 		}
1034*43a90889SApple OSS Distributions 
1035*43a90889SApple OSS Distributions 		/* Free it to zone */
1036*43a90889SApple OSS Distributions 		zfree(cp->mc_slab_zone, *pbuf);
1037*43a90889SApple OSS Distributions 
1038*43a90889SApple OSS Distributions 		/* No more objects to free; return to mcache */
1039*43a90889SApple OSS Distributions 		if ((list = nlist) == NULL) {
1040*43a90889SApple OSS Distributions 			break;
1041*43a90889SApple OSS Distributions 		}
1042*43a90889SApple OSS Distributions 	}
1043*43a90889SApple OSS Distributions }
1044*43a90889SApple OSS Distributions 
1045*43a90889SApple OSS Distributions /*
1046*43a90889SApple OSS Distributions  * Internal slab auditor for simple caches.
1047*43a90889SApple OSS Distributions  */
1048*43a90889SApple OSS Distributions static void
mcache_slab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)1049*43a90889SApple OSS Distributions mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
1050*43a90889SApple OSS Distributions {
1051*43a90889SApple OSS Distributions 	mcache_t *cp = arg;
1052*43a90889SApple OSS Distributions 	size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
1053*43a90889SApple OSS Distributions 	void *base, **pbuf;
1054*43a90889SApple OSS Distributions 
1055*43a90889SApple OSS Distributions 	while (list != NULL) {
1056*43a90889SApple OSS Distributions 		mcache_obj_t *next = list->obj_next;
1057*43a90889SApple OSS Distributions 
1058*43a90889SApple OSS Distributions 		base = list;
1059*43a90889SApple OSS Distributions 		VERIFY(IS_P2ALIGNED(base, cp->mc_align));
1060*43a90889SApple OSS Distributions 
1061*43a90889SApple OSS Distributions 		/* Get the original address */
1062*43a90889SApple OSS Distributions 		pbuf = (void **)((intptr_t)base - sizeof(void *));
1063*43a90889SApple OSS Distributions 
1064*43a90889SApple OSS Distributions 		VERIFY(((intptr_t)base + rsize) <=
1065*43a90889SApple OSS Distributions 		    ((intptr_t)*pbuf + cp->mc_chunksize));
1066*43a90889SApple OSS Distributions 
1067*43a90889SApple OSS Distributions 		if (!alloc) {
1068*43a90889SApple OSS Distributions 			mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
1069*43a90889SApple OSS Distributions 		} else {
1070*43a90889SApple OSS Distributions 			mcache_audit_free_verify_set(NULL, base, 0, rsize);
1071*43a90889SApple OSS Distributions 		}
1072*43a90889SApple OSS Distributions 
1073*43a90889SApple OSS Distributions 		list = list->obj_next = next;
1074*43a90889SApple OSS Distributions 	}
1075*43a90889SApple OSS Distributions }
1076*43a90889SApple OSS Distributions 
1077*43a90889SApple OSS Distributions /*
1078*43a90889SApple OSS Distributions  * Refill the CPU's buckets with bkt and its follower (if any).
1079*43a90889SApple OSS Distributions  */
1080*43a90889SApple OSS Distributions static void
mcache_cpu_batch_refill(mcache_cpu_t * ccp,mcache_bkt_t * bkt,int objs)1081*43a90889SApple OSS Distributions mcache_cpu_batch_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
1082*43a90889SApple OSS Distributions {
1083*43a90889SApple OSS Distributions 	ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
1084*43a90889SApple OSS Distributions 	    (ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
1085*43a90889SApple OSS Distributions 	ASSERT(ccp->cc_bktsize > 0);
1086*43a90889SApple OSS Distributions 
1087*43a90889SApple OSS Distributions 	ccp->cc_filled = bkt;
1088*43a90889SApple OSS Distributions 	ccp->cc_objs = objs;
1089*43a90889SApple OSS Distributions 	if (__probable(bkt->bkt_next != NULL)) {
1090*43a90889SApple OSS Distributions 		ccp->cc_pfilled = bkt->bkt_next;
1091*43a90889SApple OSS Distributions 		ccp->cc_pobjs = objs;
1092*43a90889SApple OSS Distributions 		bkt->bkt_next = NULL;
1093*43a90889SApple OSS Distributions 	} else {
1094*43a90889SApple OSS Distributions 		ASSERT(bkt->bkt_next == NULL);
1095*43a90889SApple OSS Distributions 		ccp->cc_pfilled = NULL;
1096*43a90889SApple OSS Distributions 		ccp->cc_pobjs = -1;
1097*43a90889SApple OSS Distributions 	}
1098*43a90889SApple OSS Distributions }
1099*43a90889SApple OSS Distributions 
1100*43a90889SApple OSS Distributions /*
1101*43a90889SApple OSS Distributions  * Refill the CPU's filled bucket with bkt and save the previous one.
1102*43a90889SApple OSS Distributions  */
1103*43a90889SApple OSS Distributions static void
mcache_cpu_refill(mcache_cpu_t * ccp,mcache_bkt_t * bkt,int objs)1104*43a90889SApple OSS Distributions mcache_cpu_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
1105*43a90889SApple OSS Distributions {
1106*43a90889SApple OSS Distributions 	ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
1107*43a90889SApple OSS Distributions 	    (ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
1108*43a90889SApple OSS Distributions 	ASSERT(ccp->cc_bktsize > 0);
1109*43a90889SApple OSS Distributions 
1110*43a90889SApple OSS Distributions 	ccp->cc_pfilled = ccp->cc_filled;
1111*43a90889SApple OSS Distributions 	ccp->cc_pobjs = ccp->cc_objs;
1112*43a90889SApple OSS Distributions 	ccp->cc_filled = bkt;
1113*43a90889SApple OSS Distributions 	ccp->cc_objs = objs;
1114*43a90889SApple OSS Distributions }
1115*43a90889SApple OSS Distributions 
1116*43a90889SApple OSS Distributions /*
1117*43a90889SApple OSS Distributions  * Get one or more buckets from the bucket layer.
1118*43a90889SApple OSS Distributions  */
1119*43a90889SApple OSS Distributions static uint32_t
mcache_bkt_batch_alloc(mcache_t * cp,mcache_bktlist_t * blp,mcache_bkt_t ** list,uint32_t num)1120*43a90889SApple OSS Distributions mcache_bkt_batch_alloc(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t **list,
1121*43a90889SApple OSS Distributions     uint32_t num)
1122*43a90889SApple OSS Distributions {
1123*43a90889SApple OSS Distributions 	mcache_bkt_t *bkt_list = NULL;
1124*43a90889SApple OSS Distributions 	mcache_bkt_t *bkt;
1125*43a90889SApple OSS Distributions 	uint32_t need = num;
1126*43a90889SApple OSS Distributions 
1127*43a90889SApple OSS Distributions 	ASSERT(list != NULL && need > 0);
1128*43a90889SApple OSS Distributions 
1129*43a90889SApple OSS Distributions 	if (!MCACHE_LOCK_TRY(&cp->mc_bkt_lock)) {
1130*43a90889SApple OSS Distributions 		/*
1131*43a90889SApple OSS Distributions 		 * The bucket layer lock is held by another CPU; increase
1132*43a90889SApple OSS Distributions 		 * the contention count so that we can later resize the
1133*43a90889SApple OSS Distributions 		 * bucket size accordingly.
1134*43a90889SApple OSS Distributions 		 */
1135*43a90889SApple OSS Distributions 		MCACHE_LOCK(&cp->mc_bkt_lock);
1136*43a90889SApple OSS Distributions 		cp->mc_bkt_contention++;
1137*43a90889SApple OSS Distributions 	}
1138*43a90889SApple OSS Distributions 
1139*43a90889SApple OSS Distributions 	while ((bkt = blp->bl_list) != NULL) {
1140*43a90889SApple OSS Distributions 		blp->bl_list = bkt->bkt_next;
1141*43a90889SApple OSS Distributions 		bkt->bkt_next = bkt_list;
1142*43a90889SApple OSS Distributions 		bkt_list = bkt;
1143*43a90889SApple OSS Distributions 		if (--blp->bl_total < blp->bl_min) {
1144*43a90889SApple OSS Distributions 			blp->bl_min = blp->bl_total;
1145*43a90889SApple OSS Distributions 		}
1146*43a90889SApple OSS Distributions 		blp->bl_alloc++;
1147*43a90889SApple OSS Distributions 		if (--need == 0) {
1148*43a90889SApple OSS Distributions 			break;
1149*43a90889SApple OSS Distributions 		}
1150*43a90889SApple OSS Distributions 	}
1151*43a90889SApple OSS Distributions 
1152*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&cp->mc_bkt_lock);
1153*43a90889SApple OSS Distributions 
1154*43a90889SApple OSS Distributions 	*list = bkt_list;
1155*43a90889SApple OSS Distributions 
1156*43a90889SApple OSS Distributions 	return num - need;
1157*43a90889SApple OSS Distributions }
1158*43a90889SApple OSS Distributions 
1159*43a90889SApple OSS Distributions /*
1160*43a90889SApple OSS Distributions  * Return one or more buckets to the bucket layer.
1161*43a90889SApple OSS Distributions  */
1162*43a90889SApple OSS Distributions static void
mcache_bkt_batch_free(mcache_t * cp,mcache_bktlist_t * blp,mcache_bkt_t * bkt)1163*43a90889SApple OSS Distributions mcache_bkt_batch_free(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t *bkt)
1164*43a90889SApple OSS Distributions {
1165*43a90889SApple OSS Distributions 	mcache_bkt_t *nbkt;
1166*43a90889SApple OSS Distributions 
1167*43a90889SApple OSS Distributions 	MCACHE_LOCK(&cp->mc_bkt_lock);
1168*43a90889SApple OSS Distributions 	while (bkt != NULL) {
1169*43a90889SApple OSS Distributions 		nbkt = bkt->bkt_next;
1170*43a90889SApple OSS Distributions 		bkt->bkt_next = blp->bl_list;
1171*43a90889SApple OSS Distributions 		blp->bl_list = bkt;
1172*43a90889SApple OSS Distributions 		blp->bl_total++;
1173*43a90889SApple OSS Distributions 		bkt = nbkt;
1174*43a90889SApple OSS Distributions 	}
1175*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&cp->mc_bkt_lock);
1176*43a90889SApple OSS Distributions }
1177*43a90889SApple OSS Distributions 
1178*43a90889SApple OSS Distributions /*
1179*43a90889SApple OSS Distributions  * Enable the bucket layer of a cache.
1180*43a90889SApple OSS Distributions  */
1181*43a90889SApple OSS Distributions static void
mcache_cache_bkt_enable(mcache_t * cp)1182*43a90889SApple OSS Distributions mcache_cache_bkt_enable(mcache_t *cp)
1183*43a90889SApple OSS Distributions {
1184*43a90889SApple OSS Distributions 	mcache_cpu_t *ccp;
1185*43a90889SApple OSS Distributions 	unsigned int cpu;
1186*43a90889SApple OSS Distributions 
1187*43a90889SApple OSS Distributions 	if (cp->mc_flags & MCF_NOCPUCACHE) {
1188*43a90889SApple OSS Distributions 		return;
1189*43a90889SApple OSS Distributions 	}
1190*43a90889SApple OSS Distributions 
1191*43a90889SApple OSS Distributions 	for (cpu = 0; cpu < ncpu; cpu++) {
1192*43a90889SApple OSS Distributions 		ccp = &cp->mc_cpu[cpu];
1193*43a90889SApple OSS Distributions 		MCACHE_LOCK(&ccp->cc_lock);
1194*43a90889SApple OSS Distributions 		ccp->cc_bktsize = cp->cache_bkttype->bt_bktsize;
1195*43a90889SApple OSS Distributions 		MCACHE_UNLOCK(&ccp->cc_lock);
1196*43a90889SApple OSS Distributions 	}
1197*43a90889SApple OSS Distributions }
1198*43a90889SApple OSS Distributions 
1199*43a90889SApple OSS Distributions /*
1200*43a90889SApple OSS Distributions  * Purge all buckets from a cache and disable its bucket layer.
1201*43a90889SApple OSS Distributions  */
1202*43a90889SApple OSS Distributions static void
mcache_bkt_purge(mcache_t * cp)1203*43a90889SApple OSS Distributions mcache_bkt_purge(mcache_t *cp)
1204*43a90889SApple OSS Distributions {
1205*43a90889SApple OSS Distributions 	mcache_cpu_t *ccp;
1206*43a90889SApple OSS Distributions 	mcache_bkt_t *bp, *pbp;
1207*43a90889SApple OSS Distributions 	int objs, pobjs;
1208*43a90889SApple OSS Distributions 	unsigned int cpu;
1209*43a90889SApple OSS Distributions 
1210*43a90889SApple OSS Distributions 	for (cpu = 0; cpu < ncpu; cpu++) {
1211*43a90889SApple OSS Distributions 		ccp = &cp->mc_cpu[cpu];
1212*43a90889SApple OSS Distributions 
1213*43a90889SApple OSS Distributions 		MCACHE_LOCK(&ccp->cc_lock);
1214*43a90889SApple OSS Distributions 
1215*43a90889SApple OSS Distributions 		bp = ccp->cc_filled;
1216*43a90889SApple OSS Distributions 		pbp = ccp->cc_pfilled;
1217*43a90889SApple OSS Distributions 		objs = ccp->cc_objs;
1218*43a90889SApple OSS Distributions 		pobjs = ccp->cc_pobjs;
1219*43a90889SApple OSS Distributions 		ccp->cc_filled = NULL;
1220*43a90889SApple OSS Distributions 		ccp->cc_pfilled = NULL;
1221*43a90889SApple OSS Distributions 		ccp->cc_objs = -1;
1222*43a90889SApple OSS Distributions 		ccp->cc_pobjs = -1;
1223*43a90889SApple OSS Distributions 		ccp->cc_bktsize = 0;
1224*43a90889SApple OSS Distributions 
1225*43a90889SApple OSS Distributions 		MCACHE_UNLOCK(&ccp->cc_lock);
1226*43a90889SApple OSS Distributions 
1227*43a90889SApple OSS Distributions 		if (bp != NULL) {
1228*43a90889SApple OSS Distributions 			mcache_bkt_destroy(cp, bp, objs);
1229*43a90889SApple OSS Distributions 		}
1230*43a90889SApple OSS Distributions 		if (pbp != NULL) {
1231*43a90889SApple OSS Distributions 			mcache_bkt_destroy(cp, pbp, pobjs);
1232*43a90889SApple OSS Distributions 		}
1233*43a90889SApple OSS Distributions 	}
1234*43a90889SApple OSS Distributions 
1235*43a90889SApple OSS Distributions 	mcache_bkt_ws_zero(cp);
1236*43a90889SApple OSS Distributions 	mcache_bkt_ws_reap(cp);
1237*43a90889SApple OSS Distributions }
1238*43a90889SApple OSS Distributions 
1239*43a90889SApple OSS Distributions /*
1240*43a90889SApple OSS Distributions  * Free one or more objects in the bucket to the slab layer,
1241*43a90889SApple OSS Distributions  * and also free the bucket itself.
1242*43a90889SApple OSS Distributions  */
1243*43a90889SApple OSS Distributions static void
mcache_bkt_destroy(mcache_t * cp,mcache_bkt_t * bkt,int nobjs)1244*43a90889SApple OSS Distributions mcache_bkt_destroy(mcache_t *cp, mcache_bkt_t *bkt, int nobjs)
1245*43a90889SApple OSS Distributions {
1246*43a90889SApple OSS Distributions 	if (nobjs > 0) {
1247*43a90889SApple OSS Distributions 		mcache_obj_t *top = bkt->bkt_obj[nobjs - 1];
1248*43a90889SApple OSS Distributions 
1249*43a90889SApple OSS Distributions 		if (cp->mc_flags & MCF_DEBUG) {
1250*43a90889SApple OSS Distributions 			mcache_obj_t *o = top;
1251*43a90889SApple OSS Distributions 			int cnt = 0;
1252*43a90889SApple OSS Distributions 
1253*43a90889SApple OSS Distributions 			/*
1254*43a90889SApple OSS Distributions 			 * Verify that the chain of objects in the bucket is
1255*43a90889SApple OSS Distributions 			 * valid.  Any mismatch here means a mistake when the
1256*43a90889SApple OSS Distributions 			 * object(s) were freed to the CPU layer, so we panic.
1257*43a90889SApple OSS Distributions 			 */
1258*43a90889SApple OSS Distributions 			while (o != NULL) {
1259*43a90889SApple OSS Distributions 				o = o->obj_next;
1260*43a90889SApple OSS Distributions 				++cnt;
1261*43a90889SApple OSS Distributions 			}
1262*43a90889SApple OSS Distributions 			if (cnt != nobjs) {
1263*43a90889SApple OSS Distributions 				panic("mcache_bkt_destroy: %s cp %p corrupted "
1264*43a90889SApple OSS Distributions 				    "list in bkt %p (nobjs %d actual %d)\n",
1265*43a90889SApple OSS Distributions 				    cp->mc_name, (void *)cp, (void *)bkt,
1266*43a90889SApple OSS Distributions 				    nobjs, cnt);
1267*43a90889SApple OSS Distributions 				/* NOTREACHED */
1268*43a90889SApple OSS Distributions 				__builtin_unreachable();
1269*43a90889SApple OSS Distributions 			}
1270*43a90889SApple OSS Distributions 		}
1271*43a90889SApple OSS Distributions 
1272*43a90889SApple OSS Distributions 		/* Advise the slab layer to purge the object(s) */
1273*43a90889SApple OSS Distributions 		(*cp->mc_slab_free)(cp->mc_private, top,
1274*43a90889SApple OSS Distributions 		    (cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
1275*43a90889SApple OSS Distributions 	}
1276*43a90889SApple OSS Distributions 	mcache_free(bkt->bkt_type->bt_cache, bkt);
1277*43a90889SApple OSS Distributions }
1278*43a90889SApple OSS Distributions 
1279*43a90889SApple OSS Distributions /*
1280*43a90889SApple OSS Distributions  * Update the bucket layer working set statistics.
1281*43a90889SApple OSS Distributions  */
1282*43a90889SApple OSS Distributions static void
mcache_bkt_ws_update(mcache_t * cp)1283*43a90889SApple OSS Distributions mcache_bkt_ws_update(mcache_t *cp)
1284*43a90889SApple OSS Distributions {
1285*43a90889SApple OSS Distributions 	MCACHE_LOCK(&cp->mc_bkt_lock);
1286*43a90889SApple OSS Distributions 
1287*43a90889SApple OSS Distributions 	cp->mc_full.bl_reaplimit = cp->mc_full.bl_min;
1288*43a90889SApple OSS Distributions 	cp->mc_full.bl_min = cp->mc_full.bl_total;
1289*43a90889SApple OSS Distributions 	cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_min;
1290*43a90889SApple OSS Distributions 	cp->mc_empty.bl_min = cp->mc_empty.bl_total;
1291*43a90889SApple OSS Distributions 
1292*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&cp->mc_bkt_lock);
1293*43a90889SApple OSS Distributions }
1294*43a90889SApple OSS Distributions 
1295*43a90889SApple OSS Distributions /*
1296*43a90889SApple OSS Distributions  * Mark everything as eligible for reaping (working set is zero).
1297*43a90889SApple OSS Distributions  */
1298*43a90889SApple OSS Distributions static void
mcache_bkt_ws_zero(mcache_t * cp)1299*43a90889SApple OSS Distributions mcache_bkt_ws_zero(mcache_t *cp)
1300*43a90889SApple OSS Distributions {
1301*43a90889SApple OSS Distributions 	MCACHE_LOCK(&cp->mc_bkt_lock);
1302*43a90889SApple OSS Distributions 
1303*43a90889SApple OSS Distributions 	cp->mc_full.bl_reaplimit = cp->mc_full.bl_total;
1304*43a90889SApple OSS Distributions 	cp->mc_full.bl_min = cp->mc_full.bl_total;
1305*43a90889SApple OSS Distributions 	cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_total;
1306*43a90889SApple OSS Distributions 	cp->mc_empty.bl_min = cp->mc_empty.bl_total;
1307*43a90889SApple OSS Distributions 
1308*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&cp->mc_bkt_lock);
1309*43a90889SApple OSS Distributions }
1310*43a90889SApple OSS Distributions 
1311*43a90889SApple OSS Distributions /*
1312*43a90889SApple OSS Distributions  * Reap all buckets that are beyond the working set.
1313*43a90889SApple OSS Distributions  */
1314*43a90889SApple OSS Distributions static void
mcache_bkt_ws_reap(mcache_t * cp)1315*43a90889SApple OSS Distributions mcache_bkt_ws_reap(mcache_t *cp)
1316*43a90889SApple OSS Distributions {
1317*43a90889SApple OSS Distributions 	mcache_bkt_t *bkt, *nbkt;
1318*43a90889SApple OSS Distributions 	uint32_t reap;
1319*43a90889SApple OSS Distributions 
1320*43a90889SApple OSS Distributions 	reap = MIN(cp->mc_full.bl_reaplimit, cp->mc_full.bl_min);
1321*43a90889SApple OSS Distributions 	if (reap != 0) {
1322*43a90889SApple OSS Distributions 		(void) mcache_bkt_batch_alloc(cp, &cp->mc_full, &bkt, reap);
1323*43a90889SApple OSS Distributions 		while (bkt != NULL) {
1324*43a90889SApple OSS Distributions 			nbkt = bkt->bkt_next;
1325*43a90889SApple OSS Distributions 			bkt->bkt_next = NULL;
1326*43a90889SApple OSS Distributions 			mcache_bkt_destroy(cp, bkt, bkt->bkt_type->bt_bktsize);
1327*43a90889SApple OSS Distributions 			bkt = nbkt;
1328*43a90889SApple OSS Distributions 		}
1329*43a90889SApple OSS Distributions 	}
1330*43a90889SApple OSS Distributions 
1331*43a90889SApple OSS Distributions 	reap = MIN(cp->mc_empty.bl_reaplimit, cp->mc_empty.bl_min);
1332*43a90889SApple OSS Distributions 	if (reap != 0) {
1333*43a90889SApple OSS Distributions 		(void) mcache_bkt_batch_alloc(cp, &cp->mc_empty, &bkt, reap);
1334*43a90889SApple OSS Distributions 		while (bkt != NULL) {
1335*43a90889SApple OSS Distributions 			nbkt = bkt->bkt_next;
1336*43a90889SApple OSS Distributions 			bkt->bkt_next = NULL;
1337*43a90889SApple OSS Distributions 			mcache_bkt_destroy(cp, bkt, 0);
1338*43a90889SApple OSS Distributions 			bkt = nbkt;
1339*43a90889SApple OSS Distributions 		}
1340*43a90889SApple OSS Distributions 	}
1341*43a90889SApple OSS Distributions }
1342*43a90889SApple OSS Distributions 
1343*43a90889SApple OSS Distributions static void
mcache_reap_timeout(thread_call_param_t dummy __unused,thread_call_param_t arg)1344*43a90889SApple OSS Distributions mcache_reap_timeout(thread_call_param_t dummy __unused,
1345*43a90889SApple OSS Distributions     thread_call_param_t arg)
1346*43a90889SApple OSS Distributions {
1347*43a90889SApple OSS Distributions 	volatile UInt32 *flag = arg;
1348*43a90889SApple OSS Distributions 
1349*43a90889SApple OSS Distributions 	ASSERT(flag == &mcache_reaping);
1350*43a90889SApple OSS Distributions 
1351*43a90889SApple OSS Distributions 	*flag = 0;
1352*43a90889SApple OSS Distributions }
1353*43a90889SApple OSS Distributions 
1354*43a90889SApple OSS Distributions static void
mcache_reap_done(void * flag)1355*43a90889SApple OSS Distributions mcache_reap_done(void *flag)
1356*43a90889SApple OSS Distributions {
1357*43a90889SApple OSS Distributions 	uint64_t deadline, leeway;
1358*43a90889SApple OSS Distributions 
1359*43a90889SApple OSS Distributions 	clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
1360*43a90889SApple OSS Distributions 	    &deadline);
1361*43a90889SApple OSS Distributions 	clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
1362*43a90889SApple OSS Distributions 	    NSEC_PER_SEC, &leeway);
1363*43a90889SApple OSS Distributions 	thread_call_enter_delayed_with_leeway(mcache_reap_tcall, flag,
1364*43a90889SApple OSS Distributions 	    deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1365*43a90889SApple OSS Distributions }
1366*43a90889SApple OSS Distributions 
1367*43a90889SApple OSS Distributions static void
mcache_reap_start(void * arg)1368*43a90889SApple OSS Distributions mcache_reap_start(void *arg)
1369*43a90889SApple OSS Distributions {
1370*43a90889SApple OSS Distributions 	UInt32 *flag = arg;
1371*43a90889SApple OSS Distributions 
1372*43a90889SApple OSS Distributions 	ASSERT(flag == &mcache_reaping);
1373*43a90889SApple OSS Distributions 
1374*43a90889SApple OSS Distributions 	mcache_applyall(mcache_cache_reap);
1375*43a90889SApple OSS Distributions 	mcache_dispatch(mcache_reap_done, flag);
1376*43a90889SApple OSS Distributions }
1377*43a90889SApple OSS Distributions 
1378*43a90889SApple OSS Distributions __private_extern__ void
mcache_reap(void)1379*43a90889SApple OSS Distributions mcache_reap(void)
1380*43a90889SApple OSS Distributions {
1381*43a90889SApple OSS Distributions 	UInt32 *flag = &mcache_reaping;
1382*43a90889SApple OSS Distributions 
1383*43a90889SApple OSS Distributions 	if (mcache_llock_owner == current_thread() ||
1384*43a90889SApple OSS Distributions 	    !OSCompareAndSwap(0, 1, flag)) {
1385*43a90889SApple OSS Distributions 		return;
1386*43a90889SApple OSS Distributions 	}
1387*43a90889SApple OSS Distributions 
1388*43a90889SApple OSS Distributions 	mcache_dispatch(mcache_reap_start, flag);
1389*43a90889SApple OSS Distributions }
1390*43a90889SApple OSS Distributions 
1391*43a90889SApple OSS Distributions __private_extern__ void
mcache_reap_now(mcache_t * cp,boolean_t purge)1392*43a90889SApple OSS Distributions mcache_reap_now(mcache_t *cp, boolean_t purge)
1393*43a90889SApple OSS Distributions {
1394*43a90889SApple OSS Distributions 	if (purge) {
1395*43a90889SApple OSS Distributions 		mcache_bkt_purge(cp);
1396*43a90889SApple OSS Distributions 		mcache_cache_bkt_enable(cp);
1397*43a90889SApple OSS Distributions 	} else {
1398*43a90889SApple OSS Distributions 		mcache_bkt_ws_zero(cp);
1399*43a90889SApple OSS Distributions 		mcache_bkt_ws_reap(cp);
1400*43a90889SApple OSS Distributions 	}
1401*43a90889SApple OSS Distributions }
1402*43a90889SApple OSS Distributions 
1403*43a90889SApple OSS Distributions static void
mcache_cache_reap(mcache_t * cp)1404*43a90889SApple OSS Distributions mcache_cache_reap(mcache_t *cp)
1405*43a90889SApple OSS Distributions {
1406*43a90889SApple OSS Distributions 	mcache_bkt_ws_reap(cp);
1407*43a90889SApple OSS Distributions }
1408*43a90889SApple OSS Distributions 
1409*43a90889SApple OSS Distributions /*
1410*43a90889SApple OSS Distributions  * Performs period maintenance on a cache.
1411*43a90889SApple OSS Distributions  */
1412*43a90889SApple OSS Distributions static void
mcache_cache_update(mcache_t * cp)1413*43a90889SApple OSS Distributions mcache_cache_update(mcache_t *cp)
1414*43a90889SApple OSS Distributions {
1415*43a90889SApple OSS Distributions 	int need_bkt_resize = 0;
1416*43a90889SApple OSS Distributions 	int need_bkt_reenable = 0;
1417*43a90889SApple OSS Distributions 
1418*43a90889SApple OSS Distributions 	lck_mtx_assert(&mcache_llock, LCK_MTX_ASSERT_OWNED);
1419*43a90889SApple OSS Distributions 
1420*43a90889SApple OSS Distributions 	mcache_bkt_ws_update(cp);
1421*43a90889SApple OSS Distributions 
1422*43a90889SApple OSS Distributions 	/*
1423*43a90889SApple OSS Distributions 	 * Cache resize and post-purge reenable are mutually exclusive.
1424*43a90889SApple OSS Distributions 	 * If the cache was previously purged, there is no point of
1425*43a90889SApple OSS Distributions 	 * increasing the bucket size as there was an indication of
1426*43a90889SApple OSS Distributions 	 * memory pressure on the system.
1427*43a90889SApple OSS Distributions 	 */
1428*43a90889SApple OSS Distributions 	lck_mtx_lock_spin(&cp->mc_sync_lock);
1429*43a90889SApple OSS Distributions 	if (!(cp->mc_flags & MCF_NOCPUCACHE) && cp->mc_enable_cnt) {
1430*43a90889SApple OSS Distributions 		need_bkt_reenable = 1;
1431*43a90889SApple OSS Distributions 	}
1432*43a90889SApple OSS Distributions 	lck_mtx_unlock(&cp->mc_sync_lock);
1433*43a90889SApple OSS Distributions 
1434*43a90889SApple OSS Distributions 	MCACHE_LOCK(&cp->mc_bkt_lock);
1435*43a90889SApple OSS Distributions 	/*
1436*43a90889SApple OSS Distributions 	 * If the contention count is greater than the threshold, and if
1437*43a90889SApple OSS Distributions 	 * we are not already at the maximum bucket size, increase it.
1438*43a90889SApple OSS Distributions 	 * Otherwise, if this cache was previously purged by the user
1439*43a90889SApple OSS Distributions 	 * then we simply reenable it.
1440*43a90889SApple OSS Distributions 	 */
1441*43a90889SApple OSS Distributions 	if ((unsigned int)cp->mc_chunksize < cp->cache_bkttype->bt_maxbuf &&
1442*43a90889SApple OSS Distributions 	    (int)(cp->mc_bkt_contention - cp->mc_bkt_contention_prev) >
1443*43a90889SApple OSS Distributions 	    mcache_bkt_contention && !need_bkt_reenable) {
1444*43a90889SApple OSS Distributions 		need_bkt_resize = 1;
1445*43a90889SApple OSS Distributions 	}
1446*43a90889SApple OSS Distributions 
1447*43a90889SApple OSS Distributions 	cp->mc_bkt_contention_prev = cp->mc_bkt_contention;
1448*43a90889SApple OSS Distributions 	MCACHE_UNLOCK(&cp->mc_bkt_lock);
1449*43a90889SApple OSS Distributions 
1450*43a90889SApple OSS Distributions 	if (need_bkt_resize) {
1451*43a90889SApple OSS Distributions 		mcache_dispatch(mcache_cache_bkt_resize, cp);
1452*43a90889SApple OSS Distributions 	} else if (need_bkt_reenable) {
1453*43a90889SApple OSS Distributions 		mcache_dispatch(mcache_cache_enable, cp);
1454*43a90889SApple OSS Distributions 	}
1455*43a90889SApple OSS Distributions }
1456*43a90889SApple OSS Distributions 
1457*43a90889SApple OSS Distributions /*
1458*43a90889SApple OSS Distributions  * Recompute a cache's bucket size.  This is an expensive operation
1459*43a90889SApple OSS Distributions  * and should not be done frequently; larger buckets provide for a
1460*43a90889SApple OSS Distributions  * higher transfer rate with the bucket while smaller buckets reduce
1461*43a90889SApple OSS Distributions  * the memory consumption.
1462*43a90889SApple OSS Distributions  */
1463*43a90889SApple OSS Distributions static void
mcache_cache_bkt_resize(void * arg)1464*43a90889SApple OSS Distributions mcache_cache_bkt_resize(void *arg)
1465*43a90889SApple OSS Distributions {
1466*43a90889SApple OSS Distributions 	mcache_t *cp = arg;
1467*43a90889SApple OSS Distributions 	mcache_bkttype_t *btp = cp->cache_bkttype;
1468*43a90889SApple OSS Distributions 
1469*43a90889SApple OSS Distributions 	if ((unsigned int)cp->mc_chunksize < btp->bt_maxbuf) {
1470*43a90889SApple OSS Distributions 		mcache_bkt_purge(cp);
1471*43a90889SApple OSS Distributions 
1472*43a90889SApple OSS Distributions 		/*
1473*43a90889SApple OSS Distributions 		 * Upgrade to the next bucket type with larger bucket size;
1474*43a90889SApple OSS Distributions 		 * temporarily set the previous contention snapshot to a
1475*43a90889SApple OSS Distributions 		 * negative number to prevent unnecessary resize request.
1476*43a90889SApple OSS Distributions 		 */
1477*43a90889SApple OSS Distributions 		MCACHE_LOCK(&cp->mc_bkt_lock);
1478*43a90889SApple OSS Distributions 		cp->cache_bkttype = ++btp;
1479*43a90889SApple OSS Distributions 		cp->mc_bkt_contention_prev = cp->mc_bkt_contention + INT_MAX;
1480*43a90889SApple OSS Distributions 		MCACHE_UNLOCK(&cp->mc_bkt_lock);
1481*43a90889SApple OSS Distributions 
1482*43a90889SApple OSS Distributions 		mcache_cache_enable(cp);
1483*43a90889SApple OSS Distributions 	}
1484*43a90889SApple OSS Distributions }
1485*43a90889SApple OSS Distributions 
1486*43a90889SApple OSS Distributions /*
1487*43a90889SApple OSS Distributions  * Reenable a previously disabled cache due to purge.
1488*43a90889SApple OSS Distributions  */
1489*43a90889SApple OSS Distributions static void
mcache_cache_enable(void * arg)1490*43a90889SApple OSS Distributions mcache_cache_enable(void *arg)
1491*43a90889SApple OSS Distributions {
1492*43a90889SApple OSS Distributions 	mcache_t *cp = arg;
1493*43a90889SApple OSS Distributions 
1494*43a90889SApple OSS Distributions 	lck_mtx_lock_spin(&cp->mc_sync_lock);
1495*43a90889SApple OSS Distributions 	cp->mc_purge_cnt = 0;
1496*43a90889SApple OSS Distributions 	cp->mc_enable_cnt = 0;
1497*43a90889SApple OSS Distributions 	lck_mtx_unlock(&cp->mc_sync_lock);
1498*43a90889SApple OSS Distributions 
1499*43a90889SApple OSS Distributions 	mcache_cache_bkt_enable(cp);
1500*43a90889SApple OSS Distributions }
1501*43a90889SApple OSS Distributions 
1502*43a90889SApple OSS Distributions static void
mcache_update_timeout(__unused void * arg)1503*43a90889SApple OSS Distributions mcache_update_timeout(__unused void *arg)
1504*43a90889SApple OSS Distributions {
1505*43a90889SApple OSS Distributions 	uint64_t deadline, leeway;
1506*43a90889SApple OSS Distributions 
1507*43a90889SApple OSS Distributions 	clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
1508*43a90889SApple OSS Distributions 	    &deadline);
1509*43a90889SApple OSS Distributions 	clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
1510*43a90889SApple OSS Distributions 	    NSEC_PER_SEC, &leeway);
1511*43a90889SApple OSS Distributions 	thread_call_enter_delayed_with_leeway(mcache_update_tcall, NULL,
1512*43a90889SApple OSS Distributions 	    deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1513*43a90889SApple OSS Distributions }
1514*43a90889SApple OSS Distributions 
1515*43a90889SApple OSS Distributions static void
mcache_update(thread_call_param_t arg __unused,thread_call_param_t dummy __unused)1516*43a90889SApple OSS Distributions mcache_update(thread_call_param_t arg __unused,
1517*43a90889SApple OSS Distributions     thread_call_param_t dummy __unused)
1518*43a90889SApple OSS Distributions {
1519*43a90889SApple OSS Distributions 	mcache_applyall(mcache_cache_update);
1520*43a90889SApple OSS Distributions 	mcache_update_timeout(NULL);
1521*43a90889SApple OSS Distributions }
1522*43a90889SApple OSS Distributions 
1523*43a90889SApple OSS Distributions static void
mcache_applyall(void (* func)(mcache_t *))1524*43a90889SApple OSS Distributions mcache_applyall(void (*func)(mcache_t *))
1525*43a90889SApple OSS Distributions {
1526*43a90889SApple OSS Distributions 	mcache_t *cp;
1527*43a90889SApple OSS Distributions 
1528*43a90889SApple OSS Distributions 	MCACHE_LIST_LOCK();
1529*43a90889SApple OSS Distributions 	LIST_FOREACH(cp, &mcache_head, mc_list) {
1530*43a90889SApple OSS Distributions 		func(cp);
1531*43a90889SApple OSS Distributions 	}
1532*43a90889SApple OSS Distributions 	MCACHE_LIST_UNLOCK();
1533*43a90889SApple OSS Distributions }
1534*43a90889SApple OSS Distributions 
1535*43a90889SApple OSS Distributions static void
mcache_dispatch(void (* func)(void *),void * arg)1536*43a90889SApple OSS Distributions mcache_dispatch(void (*func)(void *), void *arg)
1537*43a90889SApple OSS Distributions {
1538*43a90889SApple OSS Distributions 	ASSERT(func != NULL);
1539*43a90889SApple OSS Distributions 	timeout(func, arg, hz / 1000);
1540*43a90889SApple OSS Distributions }
1541*43a90889SApple OSS Distributions 
1542*43a90889SApple OSS Distributions __private_extern__ void
mcache_buffer_log(mcache_audit_t * mca,void * addr,mcache_t * cp,struct timeval * base_ts)1543*43a90889SApple OSS Distributions mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp,
1544*43a90889SApple OSS Distributions     struct timeval *base_ts)
1545*43a90889SApple OSS Distributions {
1546*43a90889SApple OSS Distributions 	struct timeval now, base = { .tv_sec = 0, .tv_usec = 0 };
1547*43a90889SApple OSS Distributions 	void *stack[MCACHE_STACK_DEPTH + 1];
1548*43a90889SApple OSS Distributions 	struct mca_trn *transaction;
1549*43a90889SApple OSS Distributions 
1550*43a90889SApple OSS Distributions 	transaction = &mca->mca_trns[mca->mca_next_trn];
1551*43a90889SApple OSS Distributions 
1552*43a90889SApple OSS Distributions 	mca->mca_addr = addr;
1553*43a90889SApple OSS Distributions 	mca->mca_cache = cp;
1554*43a90889SApple OSS Distributions 
1555*43a90889SApple OSS Distributions 	transaction->mca_thread = current_thread();
1556*43a90889SApple OSS Distributions 
1557*43a90889SApple OSS Distributions 	bzero(stack, sizeof(stack));
1558*43a90889SApple OSS Distributions 	transaction->mca_depth = (uint16_t)OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
1559*43a90889SApple OSS Distributions 	bcopy(&stack[1], transaction->mca_stack,
1560*43a90889SApple OSS Distributions 	    sizeof(transaction->mca_stack));
1561*43a90889SApple OSS Distributions 
1562*43a90889SApple OSS Distributions 	microuptime(&now);
1563*43a90889SApple OSS Distributions 	if (base_ts != NULL) {
1564*43a90889SApple OSS Distributions 		base = *base_ts;
1565*43a90889SApple OSS Distributions 	}
1566*43a90889SApple OSS Distributions 	/* tstamp is in ms relative to base_ts */
1567*43a90889SApple OSS Distributions 	transaction->mca_tstamp = ((now.tv_usec - base.tv_usec) / 1000);
1568*43a90889SApple OSS Distributions 	if ((now.tv_sec - base.tv_sec) > 0) {
1569*43a90889SApple OSS Distributions 		transaction->mca_tstamp += ((now.tv_sec - base.tv_sec) * 1000);
1570*43a90889SApple OSS Distributions 	}
1571*43a90889SApple OSS Distributions 
1572*43a90889SApple OSS Distributions 	mca->mca_next_trn =
1573*43a90889SApple OSS Distributions 	    (mca->mca_next_trn + 1) % mca_trn_max;
1574*43a90889SApple OSS Distributions }
1575*43a90889SApple OSS Distributions 
1576*43a90889SApple OSS Distributions /*
1577*43a90889SApple OSS Distributions  * N.B.: mcache_set_pattern(), mcache_verify_pattern() and
1578*43a90889SApple OSS Distributions  * mcache_verify_set_pattern() are marked as noinline to prevent the
1579*43a90889SApple OSS Distributions  * compiler from aliasing pointers when they are inlined inside the callers
1580*43a90889SApple OSS Distributions  * (e.g. mcache_audit_free_verify_set()) which would be undefined behavior.
1581*43a90889SApple OSS Distributions  */
1582*43a90889SApple OSS Distributions __private_extern__ OS_NOINLINE void
mcache_set_pattern(u_int64_t pattern,void * buf_arg,size_t size)1583*43a90889SApple OSS Distributions mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size)
1584*43a90889SApple OSS Distributions {
1585*43a90889SApple OSS Distributions 	u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1586*43a90889SApple OSS Distributions 	u_int64_t *buf = (u_int64_t *)buf_arg;
1587*43a90889SApple OSS Distributions 
1588*43a90889SApple OSS Distributions 	VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1589*43a90889SApple OSS Distributions 	VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1590*43a90889SApple OSS Distributions 
1591*43a90889SApple OSS Distributions 	while (buf < buf_end) {
1592*43a90889SApple OSS Distributions 		*buf++ = pattern;
1593*43a90889SApple OSS Distributions 	}
1594*43a90889SApple OSS Distributions }
1595*43a90889SApple OSS Distributions 
1596*43a90889SApple OSS Distributions __private_extern__ OS_NOINLINE void *
mcache_verify_pattern(u_int64_t pattern,void * buf_arg,size_t size)1597*43a90889SApple OSS Distributions mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size)
1598*43a90889SApple OSS Distributions {
1599*43a90889SApple OSS Distributions 	u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1600*43a90889SApple OSS Distributions 	u_int64_t *buf;
1601*43a90889SApple OSS Distributions 
1602*43a90889SApple OSS Distributions 	VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1603*43a90889SApple OSS Distributions 	VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1604*43a90889SApple OSS Distributions 
1605*43a90889SApple OSS Distributions 	for (buf = buf_arg; buf < buf_end; buf++) {
1606*43a90889SApple OSS Distributions 		if (*buf != pattern) {
1607*43a90889SApple OSS Distributions 			return buf;
1608*43a90889SApple OSS Distributions 		}
1609*43a90889SApple OSS Distributions 	}
1610*43a90889SApple OSS Distributions 	return NULL;
1611*43a90889SApple OSS Distributions }
1612*43a90889SApple OSS Distributions 
1613*43a90889SApple OSS Distributions OS_NOINLINE static void *
mcache_verify_set_pattern(u_int64_t old,u_int64_t new,void * buf_arg,size_t size)1614*43a90889SApple OSS Distributions mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg,
1615*43a90889SApple OSS Distributions     size_t size)
1616*43a90889SApple OSS Distributions {
1617*43a90889SApple OSS Distributions 	u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1618*43a90889SApple OSS Distributions 	u_int64_t *buf;
1619*43a90889SApple OSS Distributions 
1620*43a90889SApple OSS Distributions 	VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1621*43a90889SApple OSS Distributions 	VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1622*43a90889SApple OSS Distributions 
1623*43a90889SApple OSS Distributions 	for (buf = buf_arg; buf < buf_end; buf++) {
1624*43a90889SApple OSS Distributions 		if (*buf != old) {
1625*43a90889SApple OSS Distributions 			mcache_set_pattern(old, buf_arg,
1626*43a90889SApple OSS Distributions 			    (uintptr_t)buf - (uintptr_t)buf_arg);
1627*43a90889SApple OSS Distributions 			return buf;
1628*43a90889SApple OSS Distributions 		}
1629*43a90889SApple OSS Distributions 		*buf = new;
1630*43a90889SApple OSS Distributions 	}
1631*43a90889SApple OSS Distributions 	return NULL;
1632*43a90889SApple OSS Distributions }
1633*43a90889SApple OSS Distributions 
1634*43a90889SApple OSS Distributions __private_extern__ void
mcache_audit_free_verify(mcache_audit_t * mca,void * base,size_t offset,size_t size)1635*43a90889SApple OSS Distributions mcache_audit_free_verify(mcache_audit_t *mca, void *base, size_t offset,
1636*43a90889SApple OSS Distributions     size_t size)
1637*43a90889SApple OSS Distributions {
1638*43a90889SApple OSS Distributions 	void *addr;
1639*43a90889SApple OSS Distributions 	u_int64_t *oaddr64;
1640*43a90889SApple OSS Distributions 	mcache_obj_t *next;
1641*43a90889SApple OSS Distributions 
1642*43a90889SApple OSS Distributions 	addr = (void *)((uintptr_t)base + offset);
1643*43a90889SApple OSS Distributions 	next = ((mcache_obj_t *)addr)->obj_next;
1644*43a90889SApple OSS Distributions 
1645*43a90889SApple OSS Distributions 	/* For the "obj_next" pointer in the buffer */
1646*43a90889SApple OSS Distributions 	oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t));
1647*43a90889SApple OSS Distributions 	*oaddr64 = MCACHE_FREE_PATTERN;
1648*43a90889SApple OSS Distributions 
1649*43a90889SApple OSS Distributions 	if ((oaddr64 = mcache_verify_pattern(MCACHE_FREE_PATTERN,
1650*43a90889SApple OSS Distributions 	    (caddr_t)base, size)) != NULL) {
1651*43a90889SApple OSS Distributions 		mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
1652*43a90889SApple OSS Distributions 		    (int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
1653*43a90889SApple OSS Distributions 		/* NOTREACHED */
1654*43a90889SApple OSS Distributions 	}
1655*43a90889SApple OSS Distributions 	((mcache_obj_t *)addr)->obj_next = next;
1656*43a90889SApple OSS Distributions }
1657*43a90889SApple OSS Distributions 
1658*43a90889SApple OSS Distributions __private_extern__ void
mcache_audit_free_verify_set(mcache_audit_t * mca,void * base,size_t offset,size_t size)1659*43a90889SApple OSS Distributions mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset,
1660*43a90889SApple OSS Distributions     size_t size)
1661*43a90889SApple OSS Distributions {
1662*43a90889SApple OSS Distributions 	void *addr;
1663*43a90889SApple OSS Distributions 	u_int64_t *oaddr64;
1664*43a90889SApple OSS Distributions 	mcache_obj_t *next;
1665*43a90889SApple OSS Distributions 
1666*43a90889SApple OSS Distributions 	addr = (void *)((uintptr_t)base + offset);
1667*43a90889SApple OSS Distributions 	next = ((mcache_obj_t *)addr)->obj_next;
1668*43a90889SApple OSS Distributions 
1669*43a90889SApple OSS Distributions 	/* For the "obj_next" pointer in the buffer */
1670*43a90889SApple OSS Distributions 	oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t));
1671*43a90889SApple OSS Distributions 	*oaddr64 = MCACHE_FREE_PATTERN;
1672*43a90889SApple OSS Distributions 
1673*43a90889SApple OSS Distributions 	if ((oaddr64 = mcache_verify_set_pattern(MCACHE_FREE_PATTERN,
1674*43a90889SApple OSS Distributions 	    MCACHE_UNINITIALIZED_PATTERN, (caddr_t)base, size)) != NULL) {
1675*43a90889SApple OSS Distributions 		mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
1676*43a90889SApple OSS Distributions 		    (int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
1677*43a90889SApple OSS Distributions 		/* NOTREACHED */
1678*43a90889SApple OSS Distributions 	}
1679*43a90889SApple OSS Distributions 	((mcache_obj_t *)addr)->obj_next = next;
1680*43a90889SApple OSS Distributions }
1681*43a90889SApple OSS Distributions 
1682*43a90889SApple OSS Distributions #undef panic
1683*43a90889SApple OSS Distributions 
1684*43a90889SApple OSS Distributions #define DUMP_TRN_FMT() \
1685*43a90889SApple OSS Distributions 	    "%s transaction thread %p saved PC stack (%d deep):\n" \
1686*43a90889SApple OSS Distributions 	    "\t%p, %p, %p, %p, %p, %p, %p, %p\n" \
1687*43a90889SApple OSS Distributions 	    "\t%p, %p, %p, %p, %p, %p, %p, %p\n"
1688*43a90889SApple OSS Distributions 
1689*43a90889SApple OSS Distributions #define DUMP_TRN_FIELDS(s, x) \
1690*43a90889SApple OSS Distributions 	    s, \
1691*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_thread, mca->mca_trns[x].mca_depth, \
1692*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[0], mca->mca_trns[x].mca_stack[1], \
1693*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[2], mca->mca_trns[x].mca_stack[3], \
1694*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[4], mca->mca_trns[x].mca_stack[5], \
1695*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[6], mca->mca_trns[x].mca_stack[7], \
1696*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[8], mca->mca_trns[x].mca_stack[9], \
1697*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[10], mca->mca_trns[x].mca_stack[11], \
1698*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[12], mca->mca_trns[x].mca_stack[13], \
1699*43a90889SApple OSS Distributions 	    mca->mca_trns[x].mca_stack[14], mca->mca_trns[x].mca_stack[15]
1700*43a90889SApple OSS Distributions 
1701*43a90889SApple OSS Distributions #define MCA_TRN_LAST ((mca->mca_next_trn + mca_trn_max) % mca_trn_max)
1702*43a90889SApple OSS Distributions #define MCA_TRN_PREV ((mca->mca_next_trn + mca_trn_max - 1) % mca_trn_max)
1703*43a90889SApple OSS Distributions 
1704*43a90889SApple OSS Distributions __private_extern__ char *
mcache_dump_mca(char buf[static DUMP_MCA_BUF_SIZE],mcache_audit_t * mca)1705*43a90889SApple OSS Distributions mcache_dump_mca(char buf[static DUMP_MCA_BUF_SIZE], mcache_audit_t *mca)
1706*43a90889SApple OSS Distributions {
1707*43a90889SApple OSS Distributions 	snprintf(buf, DUMP_MCA_BUF_SIZE,
1708*43a90889SApple OSS Distributions 	    "mca %p: addr %p, cache %p (%s) nxttrn %d\n"
1709*43a90889SApple OSS Distributions 	    DUMP_TRN_FMT()
1710*43a90889SApple OSS Distributions 	    DUMP_TRN_FMT(),
1711*43a90889SApple OSS Distributions 
1712*43a90889SApple OSS Distributions 	    mca, mca->mca_addr, mca->mca_cache,
1713*43a90889SApple OSS Distributions 	    mca->mca_cache ? mca->mca_cache->mc_name : "?",
1714*43a90889SApple OSS Distributions 	    mca->mca_next_trn,
1715*43a90889SApple OSS Distributions 
1716*43a90889SApple OSS Distributions 	    DUMP_TRN_FIELDS("last", MCA_TRN_LAST),
1717*43a90889SApple OSS Distributions 	    DUMP_TRN_FIELDS("previous", MCA_TRN_PREV));
1718*43a90889SApple OSS Distributions 
1719*43a90889SApple OSS Distributions 	return buf;
1720*43a90889SApple OSS Distributions }
1721*43a90889SApple OSS Distributions 
1722*43a90889SApple OSS Distributions __attribute__((noreturn))
1723*43a90889SApple OSS Distributions static void
mcache_audit_panic(mcache_audit_t * mca,void * addr,size_t offset,int64_t expected,int64_t got)1724*43a90889SApple OSS Distributions mcache_audit_panic(mcache_audit_t *mca, void *addr, size_t offset,
1725*43a90889SApple OSS Distributions     int64_t expected, int64_t got)
1726*43a90889SApple OSS Distributions {
1727*43a90889SApple OSS Distributions 	char buf[DUMP_MCA_BUF_SIZE];
1728*43a90889SApple OSS Distributions 
1729*43a90889SApple OSS Distributions 	if (mca == NULL) {
1730*43a90889SApple OSS Distributions 		panic("mcache_audit: buffer %p modified after free at "
1731*43a90889SApple OSS Distributions 		    "offset 0x%lx (0x%llx instead of 0x%llx)\n", addr,
1732*43a90889SApple OSS Distributions 		    offset, got, expected);
1733*43a90889SApple OSS Distributions 		/* NOTREACHED */
1734*43a90889SApple OSS Distributions 		__builtin_unreachable();
1735*43a90889SApple OSS Distributions 	}
1736*43a90889SApple OSS Distributions 
1737*43a90889SApple OSS Distributions 	panic("mcache_audit: buffer %p modified after free at offset 0x%lx "
1738*43a90889SApple OSS Distributions 	    "(0x%llx instead of 0x%llx)\n%s\n",
1739*43a90889SApple OSS Distributions 	    addr, offset, got, expected, mcache_dump_mca(buf, mca));
1740*43a90889SApple OSS Distributions 	/* NOTREACHED */
1741*43a90889SApple OSS Distributions 	__builtin_unreachable();
1742*43a90889SApple OSS Distributions }
1743