1*aca3beaaSApple OSS Distributions /*
2*aca3beaaSApple OSS Distributions * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3*aca3beaaSApple OSS Distributions *
4*aca3beaaSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*aca3beaaSApple OSS Distributions *
6*aca3beaaSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*aca3beaaSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*aca3beaaSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*aca3beaaSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*aca3beaaSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*aca3beaaSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*aca3beaaSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*aca3beaaSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*aca3beaaSApple OSS Distributions *
15*aca3beaaSApple OSS Distributions * Please obtain a copy of the License at
16*aca3beaaSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*aca3beaaSApple OSS Distributions *
18*aca3beaaSApple OSS Distributions * The Original Code and all software distributed under the License are
19*aca3beaaSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*aca3beaaSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*aca3beaaSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*aca3beaaSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*aca3beaaSApple OSS Distributions * Please see the License for the specific language governing rights and
24*aca3beaaSApple OSS Distributions * limitations under the License.
25*aca3beaaSApple OSS Distributions *
26*aca3beaaSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*aca3beaaSApple OSS Distributions */
28*aca3beaaSApple OSS Distributions
29*aca3beaaSApple OSS Distributions /*
30*aca3beaaSApple OSS Distributions * Memory allocator with per-CPU caching, derived from the kmem magazine
31*aca3beaaSApple OSS Distributions * concept and implementation as described in the following paper:
32*aca3beaaSApple OSS Distributions * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick.pdf
33*aca3beaaSApple OSS Distributions * That implementation is Copyright 2006 Sun Microsystems, Inc. All rights
34*aca3beaaSApple OSS Distributions * reserved. Use is subject to license terms.
35*aca3beaaSApple OSS Distributions *
36*aca3beaaSApple OSS Distributions * There are several major differences between this and the original kmem
37*aca3beaaSApple OSS Distributions * magazine: this derivative implementation allows for multiple objects to
38*aca3beaaSApple OSS Distributions * be allocated and freed from/to the object cache in one call; in addition,
39*aca3beaaSApple OSS Distributions * it provides for better flexibility where the user is allowed to define
40*aca3beaaSApple OSS Distributions * its own slab allocator (instead of the default zone allocator). Finally,
41*aca3beaaSApple OSS Distributions * no object construction/destruction takes place at the moment, although
42*aca3beaaSApple OSS Distributions * this could be added in future to improve efficiency.
43*aca3beaaSApple OSS Distributions */
44*aca3beaaSApple OSS Distributions
45*aca3beaaSApple OSS Distributions #include <sys/param.h>
46*aca3beaaSApple OSS Distributions #include <sys/types.h>
47*aca3beaaSApple OSS Distributions #include <sys/malloc.h>
48*aca3beaaSApple OSS Distributions #include <sys/mbuf.h>
49*aca3beaaSApple OSS Distributions #include <sys/queue.h>
50*aca3beaaSApple OSS Distributions #include <sys/kernel.h>
51*aca3beaaSApple OSS Distributions #include <sys/systm.h>
52*aca3beaaSApple OSS Distributions
53*aca3beaaSApple OSS Distributions #include <kern/debug.h>
54*aca3beaaSApple OSS Distributions #include <kern/zalloc.h>
55*aca3beaaSApple OSS Distributions #include <kern/cpu_number.h>
56*aca3beaaSApple OSS Distributions #include <kern/locks.h>
57*aca3beaaSApple OSS Distributions #include <kern/thread_call.h>
58*aca3beaaSApple OSS Distributions
59*aca3beaaSApple OSS Distributions #include <libkern/libkern.h>
60*aca3beaaSApple OSS Distributions #include <libkern/OSAtomic.h>
61*aca3beaaSApple OSS Distributions #include <libkern/OSDebug.h>
62*aca3beaaSApple OSS Distributions
63*aca3beaaSApple OSS Distributions #include <mach/vm_param.h>
64*aca3beaaSApple OSS Distributions #include <machine/limits.h>
65*aca3beaaSApple OSS Distributions #include <machine/machine_routines.h>
66*aca3beaaSApple OSS Distributions
67*aca3beaaSApple OSS Distributions #include <string.h>
68*aca3beaaSApple OSS Distributions
69*aca3beaaSApple OSS Distributions #include <sys/mcache.h>
70*aca3beaaSApple OSS Distributions
71*aca3beaaSApple OSS Distributions #define MCACHE_SIZE(n) \
72*aca3beaaSApple OSS Distributions __builtin_offsetof(mcache_t, mc_cpu[n])
73*aca3beaaSApple OSS Distributions
74*aca3beaaSApple OSS Distributions /* Allocate extra in case we need to manually align the pointer */
75*aca3beaaSApple OSS Distributions #define MCACHE_ALLOC_SIZE \
76*aca3beaaSApple OSS Distributions (sizeof (void *) + MCACHE_SIZE(ncpu) + CPU_CACHE_LINE_SIZE)
77*aca3beaaSApple OSS Distributions
78*aca3beaaSApple OSS Distributions #define MCACHE_CPU(c) \
79*aca3beaaSApple OSS Distributions (mcache_cpu_t *)((void *)((char *)(c) + MCACHE_SIZE(cpu_number())))
80*aca3beaaSApple OSS Distributions
81*aca3beaaSApple OSS Distributions /*
82*aca3beaaSApple OSS Distributions * MCACHE_LIST_LOCK() and MCACHE_LIST_UNLOCK() are macros used
83*aca3beaaSApple OSS Distributions * to serialize accesses to the global list of caches in the system.
84*aca3beaaSApple OSS Distributions * They also record the thread currently running in the critical
85*aca3beaaSApple OSS Distributions * section, so that we can avoid recursive requests to reap the
86*aca3beaaSApple OSS Distributions * caches when memory runs low.
87*aca3beaaSApple OSS Distributions */
88*aca3beaaSApple OSS Distributions #define MCACHE_LIST_LOCK() { \
89*aca3beaaSApple OSS Distributions lck_mtx_lock(&mcache_llock); \
90*aca3beaaSApple OSS Distributions mcache_llock_owner = current_thread(); \
91*aca3beaaSApple OSS Distributions }
92*aca3beaaSApple OSS Distributions
93*aca3beaaSApple OSS Distributions #define MCACHE_LIST_UNLOCK() { \
94*aca3beaaSApple OSS Distributions mcache_llock_owner = NULL; \
95*aca3beaaSApple OSS Distributions lck_mtx_unlock(&mcache_llock); \
96*aca3beaaSApple OSS Distributions }
97*aca3beaaSApple OSS Distributions
98*aca3beaaSApple OSS Distributions #define MCACHE_LOCK(l) lck_mtx_lock(l)
99*aca3beaaSApple OSS Distributions #define MCACHE_UNLOCK(l) lck_mtx_unlock(l)
100*aca3beaaSApple OSS Distributions #define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l)
101*aca3beaaSApple OSS Distributions
102*aca3beaaSApple OSS Distributions static unsigned int ncpu;
103*aca3beaaSApple OSS Distributions static unsigned int cache_line_size;
104*aca3beaaSApple OSS Distributions static struct thread *mcache_llock_owner;
105*aca3beaaSApple OSS Distributions static LCK_GRP_DECLARE(mcache_llock_grp, "mcache.list");
106*aca3beaaSApple OSS Distributions static LCK_MTX_DECLARE(mcache_llock, &mcache_llock_grp);
107*aca3beaaSApple OSS Distributions static struct zone *mcache_zone;
108*aca3beaaSApple OSS Distributions static const uint32_t mcache_reap_interval = 15;
109*aca3beaaSApple OSS Distributions static const uint32_t mcache_reap_interval_leeway = 2;
110*aca3beaaSApple OSS Distributions static UInt32 mcache_reaping;
111*aca3beaaSApple OSS Distributions static int mcache_ready;
112*aca3beaaSApple OSS Distributions static int mcache_updating;
113*aca3beaaSApple OSS Distributions
114*aca3beaaSApple OSS Distributions static int mcache_bkt_contention = 3;
115*aca3beaaSApple OSS Distributions #if DEBUG
116*aca3beaaSApple OSS Distributions static unsigned int mcache_flags = MCF_DEBUG;
117*aca3beaaSApple OSS Distributions #else
118*aca3beaaSApple OSS Distributions static unsigned int mcache_flags = 0;
119*aca3beaaSApple OSS Distributions #endif
120*aca3beaaSApple OSS Distributions
121*aca3beaaSApple OSS Distributions int mca_trn_max = MCA_TRN_MAX;
122*aca3beaaSApple OSS Distributions
123*aca3beaaSApple OSS Distributions static mcache_bkttype_t mcache_bkttype[] = {
124*aca3beaaSApple OSS Distributions { 1, 4096, 32768, NULL },
125*aca3beaaSApple OSS Distributions { 3, 2048, 16384, NULL },
126*aca3beaaSApple OSS Distributions { 7, 1024, 12288, NULL },
127*aca3beaaSApple OSS Distributions { 15, 256, 8192, NULL },
128*aca3beaaSApple OSS Distributions { 31, 64, 4096, NULL },
129*aca3beaaSApple OSS Distributions { 47, 0, 2048, NULL },
130*aca3beaaSApple OSS Distributions { 63, 0, 1024, NULL },
131*aca3beaaSApple OSS Distributions { 95, 0, 512, NULL },
132*aca3beaaSApple OSS Distributions { 143, 0, 256, NULL },
133*aca3beaaSApple OSS Distributions { 165, 0, 0, NULL },
134*aca3beaaSApple OSS Distributions };
135*aca3beaaSApple OSS Distributions
136*aca3beaaSApple OSS Distributions static mcache_t *mcache_create_common(const char *, size_t, size_t,
137*aca3beaaSApple OSS Distributions mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t,
138*aca3beaaSApple OSS Distributions mcache_notifyfn_t, void *, u_int32_t, int);
139*aca3beaaSApple OSS Distributions static unsigned int mcache_slab_alloc(void *, mcache_obj_t ***,
140*aca3beaaSApple OSS Distributions unsigned int, int);
141*aca3beaaSApple OSS Distributions static void mcache_slab_free(void *, mcache_obj_t *, boolean_t);
142*aca3beaaSApple OSS Distributions static void mcache_slab_audit(void *, mcache_obj_t *, boolean_t);
143*aca3beaaSApple OSS Distributions static void mcache_cpu_refill(mcache_cpu_t *, mcache_bkt_t *, int);
144*aca3beaaSApple OSS Distributions static void mcache_cpu_batch_refill(mcache_cpu_t *, mcache_bkt_t *, int);
145*aca3beaaSApple OSS Distributions static uint32_t mcache_bkt_batch_alloc(mcache_t *, mcache_bktlist_t *,
146*aca3beaaSApple OSS Distributions mcache_bkt_t **, uint32_t);
147*aca3beaaSApple OSS Distributions static void mcache_bkt_batch_free(mcache_t *, mcache_bktlist_t *, mcache_bkt_t *);
148*aca3beaaSApple OSS Distributions static void mcache_cache_bkt_enable(mcache_t *);
149*aca3beaaSApple OSS Distributions static void mcache_bkt_purge(mcache_t *);
150*aca3beaaSApple OSS Distributions static void mcache_bkt_destroy(mcache_t *, mcache_bkt_t *, int);
151*aca3beaaSApple OSS Distributions static void mcache_bkt_ws_update(mcache_t *);
152*aca3beaaSApple OSS Distributions static void mcache_bkt_ws_zero(mcache_t *);
153*aca3beaaSApple OSS Distributions static void mcache_bkt_ws_reap(mcache_t *);
154*aca3beaaSApple OSS Distributions static void mcache_dispatch(void (*)(void *), void *);
155*aca3beaaSApple OSS Distributions static void mcache_cache_reap(mcache_t *);
156*aca3beaaSApple OSS Distributions static void mcache_cache_update(mcache_t *);
157*aca3beaaSApple OSS Distributions static void mcache_cache_bkt_resize(void *);
158*aca3beaaSApple OSS Distributions static void mcache_cache_enable(void *);
159*aca3beaaSApple OSS Distributions static void mcache_update(thread_call_param_t __unused, thread_call_param_t __unused);
160*aca3beaaSApple OSS Distributions static void mcache_update_timeout(void *);
161*aca3beaaSApple OSS Distributions static void mcache_applyall(void (*)(mcache_t *));
162*aca3beaaSApple OSS Distributions static void mcache_reap_start(void *);
163*aca3beaaSApple OSS Distributions static void mcache_reap_done(void *);
164*aca3beaaSApple OSS Distributions static void mcache_reap_timeout(thread_call_param_t __unused, thread_call_param_t);
165*aca3beaaSApple OSS Distributions static void mcache_notify(mcache_t *, u_int32_t);
166*aca3beaaSApple OSS Distributions static void mcache_purge(void *);
167*aca3beaaSApple OSS Distributions
168*aca3beaaSApple OSS Distributions static LIST_HEAD(, mcache) mcache_head;
169*aca3beaaSApple OSS Distributions mcache_t *mcache_audit_cache;
170*aca3beaaSApple OSS Distributions
171*aca3beaaSApple OSS Distributions static thread_call_t mcache_reap_tcall;
172*aca3beaaSApple OSS Distributions static thread_call_t mcache_update_tcall;
173*aca3beaaSApple OSS Distributions
174*aca3beaaSApple OSS Distributions /*
175*aca3beaaSApple OSS Distributions * Initialize the framework; this is currently called as part of BSD init.
176*aca3beaaSApple OSS Distributions */
177*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_init(void)178*aca3beaaSApple OSS Distributions mcache_init(void)
179*aca3beaaSApple OSS Distributions {
180*aca3beaaSApple OSS Distributions mcache_bkttype_t *btp;
181*aca3beaaSApple OSS Distributions unsigned int i;
182*aca3beaaSApple OSS Distributions char name[32];
183*aca3beaaSApple OSS Distributions
184*aca3beaaSApple OSS Distributions VERIFY(mca_trn_max >= 2);
185*aca3beaaSApple OSS Distributions
186*aca3beaaSApple OSS Distributions ncpu = ml_wait_max_cpus();
187*aca3beaaSApple OSS Distributions (void) mcache_cache_line_size(); /* prime it */
188*aca3beaaSApple OSS Distributions
189*aca3beaaSApple OSS Distributions mcache_reap_tcall = thread_call_allocate(mcache_reap_timeout, NULL);
190*aca3beaaSApple OSS Distributions mcache_update_tcall = thread_call_allocate(mcache_update, NULL);
191*aca3beaaSApple OSS Distributions if (mcache_reap_tcall == NULL || mcache_update_tcall == NULL) {
192*aca3beaaSApple OSS Distributions panic("mcache_init: thread_call_allocate failed");
193*aca3beaaSApple OSS Distributions /* NOTREACHED */
194*aca3beaaSApple OSS Distributions __builtin_unreachable();
195*aca3beaaSApple OSS Distributions }
196*aca3beaaSApple OSS Distributions
197*aca3beaaSApple OSS Distributions mcache_zone = zone_create("mcache", MCACHE_ALLOC_SIZE,
198*aca3beaaSApple OSS Distributions ZC_PGZ_USE_GUARDS | ZC_DESTRUCTIBLE);
199*aca3beaaSApple OSS Distributions
200*aca3beaaSApple OSS Distributions LIST_INIT(&mcache_head);
201*aca3beaaSApple OSS Distributions
202*aca3beaaSApple OSS Distributions for (i = 0; i < sizeof(mcache_bkttype) / sizeof(*btp); i++) {
203*aca3beaaSApple OSS Distributions btp = &mcache_bkttype[i];
204*aca3beaaSApple OSS Distributions (void) snprintf(name, sizeof(name), "bkt_%d",
205*aca3beaaSApple OSS Distributions btp->bt_bktsize);
206*aca3beaaSApple OSS Distributions btp->bt_cache = mcache_create(name,
207*aca3beaaSApple OSS Distributions (btp->bt_bktsize + 1) * sizeof(void *), 0, 0, MCR_SLEEP);
208*aca3beaaSApple OSS Distributions }
209*aca3beaaSApple OSS Distributions
210*aca3beaaSApple OSS Distributions PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof(mcache_flags));
211*aca3beaaSApple OSS Distributions mcache_flags &= MCF_FLAGS_MASK;
212*aca3beaaSApple OSS Distributions
213*aca3beaaSApple OSS Distributions mcache_audit_cache = mcache_create("audit", sizeof(mcache_audit_t),
214*aca3beaaSApple OSS Distributions 0, 0, MCR_SLEEP);
215*aca3beaaSApple OSS Distributions
216*aca3beaaSApple OSS Distributions mcache_applyall(mcache_cache_bkt_enable);
217*aca3beaaSApple OSS Distributions mcache_ready = 1;
218*aca3beaaSApple OSS Distributions
219*aca3beaaSApple OSS Distributions printf("mcache: %d CPU(s), %d bytes CPU cache line size\n",
220*aca3beaaSApple OSS Distributions ncpu, CPU_CACHE_LINE_SIZE);
221*aca3beaaSApple OSS Distributions }
222*aca3beaaSApple OSS Distributions
223*aca3beaaSApple OSS Distributions /*
224*aca3beaaSApple OSS Distributions * Return the global mcache flags.
225*aca3beaaSApple OSS Distributions */
226*aca3beaaSApple OSS Distributions __private_extern__ unsigned int
mcache_getflags(void)227*aca3beaaSApple OSS Distributions mcache_getflags(void)
228*aca3beaaSApple OSS Distributions {
229*aca3beaaSApple OSS Distributions return mcache_flags;
230*aca3beaaSApple OSS Distributions }
231*aca3beaaSApple OSS Distributions
232*aca3beaaSApple OSS Distributions /*
233*aca3beaaSApple OSS Distributions * Return the CPU cache line size.
234*aca3beaaSApple OSS Distributions */
235*aca3beaaSApple OSS Distributions __private_extern__ unsigned int
mcache_cache_line_size(void)236*aca3beaaSApple OSS Distributions mcache_cache_line_size(void)
237*aca3beaaSApple OSS Distributions {
238*aca3beaaSApple OSS Distributions if (cache_line_size == 0) {
239*aca3beaaSApple OSS Distributions ml_cpu_info_t cpu_info;
240*aca3beaaSApple OSS Distributions ml_cpu_get_info(&cpu_info);
241*aca3beaaSApple OSS Distributions cache_line_size = (unsigned int)cpu_info.cache_line_size;
242*aca3beaaSApple OSS Distributions }
243*aca3beaaSApple OSS Distributions return cache_line_size;
244*aca3beaaSApple OSS Distributions }
245*aca3beaaSApple OSS Distributions
246*aca3beaaSApple OSS Distributions /*
247*aca3beaaSApple OSS Distributions * Create a cache using the zone allocator as the backend slab allocator.
248*aca3beaaSApple OSS Distributions * The caller may specify any alignment for the object; if it specifies 0
249*aca3beaaSApple OSS Distributions * the default alignment (MCACHE_ALIGN) will be used.
250*aca3beaaSApple OSS Distributions */
251*aca3beaaSApple OSS Distributions __private_extern__ mcache_t *
mcache_create(const char * name,size_t bufsize,size_t align,u_int32_t flags,int wait __unused)252*aca3beaaSApple OSS Distributions mcache_create(const char *name, size_t bufsize, size_t align,
253*aca3beaaSApple OSS Distributions u_int32_t flags, int wait __unused)
254*aca3beaaSApple OSS Distributions {
255*aca3beaaSApple OSS Distributions return mcache_create_common(name, bufsize, align, mcache_slab_alloc,
256*aca3beaaSApple OSS Distributions mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1);
257*aca3beaaSApple OSS Distributions }
258*aca3beaaSApple OSS Distributions
259*aca3beaaSApple OSS Distributions /*
260*aca3beaaSApple OSS Distributions * Create a cache using a custom backend slab allocator. Since the caller
261*aca3beaaSApple OSS Distributions * is responsible for allocation, no alignment guarantee will be provided
262*aca3beaaSApple OSS Distributions * by this framework.
263*aca3beaaSApple OSS Distributions */
264*aca3beaaSApple OSS Distributions __private_extern__ mcache_t *
mcache_create_ext(const char * name,size_t bufsize,mcache_allocfn_t allocfn,mcache_freefn_t freefn,mcache_auditfn_t auditfn,mcache_logfn_t logfn,mcache_notifyfn_t notifyfn,void * arg,u_int32_t flags,int wait __unused)265*aca3beaaSApple OSS Distributions mcache_create_ext(const char *name, size_t bufsize,
266*aca3beaaSApple OSS Distributions mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
267*aca3beaaSApple OSS Distributions mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
268*aca3beaaSApple OSS Distributions u_int32_t flags, int wait __unused)
269*aca3beaaSApple OSS Distributions {
270*aca3beaaSApple OSS Distributions return mcache_create_common(name, bufsize, 0, allocfn,
271*aca3beaaSApple OSS Distributions freefn, auditfn, logfn, notifyfn, arg, flags, 0);
272*aca3beaaSApple OSS Distributions }
273*aca3beaaSApple OSS Distributions
274*aca3beaaSApple OSS Distributions /*
275*aca3beaaSApple OSS Distributions * Common cache creation routine.
276*aca3beaaSApple OSS Distributions */
277*aca3beaaSApple OSS Distributions static mcache_t *
mcache_create_common(const char * name,size_t bufsize,size_t align,mcache_allocfn_t allocfn,mcache_freefn_t freefn,mcache_auditfn_t auditfn,mcache_logfn_t logfn,mcache_notifyfn_t notifyfn,void * arg,u_int32_t flags,int need_zone)278*aca3beaaSApple OSS Distributions mcache_create_common(const char *name, size_t bufsize, size_t align,
279*aca3beaaSApple OSS Distributions mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
280*aca3beaaSApple OSS Distributions mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
281*aca3beaaSApple OSS Distributions u_int32_t flags, int need_zone)
282*aca3beaaSApple OSS Distributions {
283*aca3beaaSApple OSS Distributions mcache_bkttype_t *btp;
284*aca3beaaSApple OSS Distributions mcache_t *cp = NULL;
285*aca3beaaSApple OSS Distributions size_t chunksize;
286*aca3beaaSApple OSS Distributions void *buf, **pbuf;
287*aca3beaaSApple OSS Distributions unsigned int c;
288*aca3beaaSApple OSS Distributions char lck_name[64];
289*aca3beaaSApple OSS Distributions
290*aca3beaaSApple OSS Distributions buf = zalloc_flags(mcache_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
291*aca3beaaSApple OSS Distributions
292*aca3beaaSApple OSS Distributions /*
293*aca3beaaSApple OSS Distributions * In case we didn't get a cache-aligned memory, round it up
294*aca3beaaSApple OSS Distributions * accordingly. This is needed in order to get the rest of
295*aca3beaaSApple OSS Distributions * structure members aligned properly. It also means that
296*aca3beaaSApple OSS Distributions * the memory span gets shifted due to the round up, but it
297*aca3beaaSApple OSS Distributions * is okay since we've allocated extra space for this.
298*aca3beaaSApple OSS Distributions */
299*aca3beaaSApple OSS Distributions cp = (mcache_t *)
300*aca3beaaSApple OSS Distributions P2ROUNDUP((intptr_t)buf + sizeof(void *), CPU_CACHE_LINE_SIZE);
301*aca3beaaSApple OSS Distributions pbuf = (void **)((intptr_t)cp - sizeof(void *));
302*aca3beaaSApple OSS Distributions *pbuf = buf;
303*aca3beaaSApple OSS Distributions
304*aca3beaaSApple OSS Distributions /*
305*aca3beaaSApple OSS Distributions * Guaranteed alignment is valid only when we use the internal
306*aca3beaaSApple OSS Distributions * slab allocator (currently set to use the zone allocator).
307*aca3beaaSApple OSS Distributions */
308*aca3beaaSApple OSS Distributions if (!need_zone) {
309*aca3beaaSApple OSS Distributions align = 1;
310*aca3beaaSApple OSS Distributions } else {
311*aca3beaaSApple OSS Distributions /* Enforce 64-bit minimum alignment for zone-based buffers */
312*aca3beaaSApple OSS Distributions if (align == 0) {
313*aca3beaaSApple OSS Distributions align = MCACHE_ALIGN;
314*aca3beaaSApple OSS Distributions }
315*aca3beaaSApple OSS Distributions align = P2ROUNDUP(align, MCACHE_ALIGN);
316*aca3beaaSApple OSS Distributions }
317*aca3beaaSApple OSS Distributions
318*aca3beaaSApple OSS Distributions if ((align & (align - 1)) != 0) {
319*aca3beaaSApple OSS Distributions panic("mcache_create: bad alignment %lu", align);
320*aca3beaaSApple OSS Distributions /* NOTREACHED */
321*aca3beaaSApple OSS Distributions __builtin_unreachable();
322*aca3beaaSApple OSS Distributions }
323*aca3beaaSApple OSS Distributions
324*aca3beaaSApple OSS Distributions cp->mc_align = align;
325*aca3beaaSApple OSS Distributions cp->mc_slab_alloc = allocfn;
326*aca3beaaSApple OSS Distributions cp->mc_slab_free = freefn;
327*aca3beaaSApple OSS Distributions cp->mc_slab_audit = auditfn;
328*aca3beaaSApple OSS Distributions cp->mc_slab_log = logfn;
329*aca3beaaSApple OSS Distributions cp->mc_slab_notify = notifyfn;
330*aca3beaaSApple OSS Distributions cp->mc_private = need_zone ? cp : arg;
331*aca3beaaSApple OSS Distributions cp->mc_bufsize = bufsize;
332*aca3beaaSApple OSS Distributions cp->mc_flags = (flags & MCF_FLAGS_MASK) | mcache_flags;
333*aca3beaaSApple OSS Distributions
334*aca3beaaSApple OSS Distributions (void) snprintf(cp->mc_name, sizeof(cp->mc_name), "mcache.%s", name);
335*aca3beaaSApple OSS Distributions
336*aca3beaaSApple OSS Distributions (void) snprintf(lck_name, sizeof(lck_name), "%s.cpu", cp->mc_name);
337*aca3beaaSApple OSS Distributions cp->mc_cpu_lock_grp = lck_grp_alloc_init(lck_name, LCK_GRP_ATTR_NULL);
338*aca3beaaSApple OSS Distributions
339*aca3beaaSApple OSS Distributions /*
340*aca3beaaSApple OSS Distributions * Allocation chunk size is the object's size plus any extra size
341*aca3beaaSApple OSS Distributions * needed to satisfy the object's alignment. It is enforced to be
342*aca3beaaSApple OSS Distributions * at least the size of an LP64 pointer to simplify auditing and to
343*aca3beaaSApple OSS Distributions * handle multiple-element allocation requests, where the elements
344*aca3beaaSApple OSS Distributions * returned are linked together in a list.
345*aca3beaaSApple OSS Distributions */
346*aca3beaaSApple OSS Distributions chunksize = MAX(bufsize, sizeof(u_int64_t));
347*aca3beaaSApple OSS Distributions if (need_zone) {
348*aca3beaaSApple OSS Distributions VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0);
349*aca3beaaSApple OSS Distributions chunksize += sizeof(uint64_t) + align;
350*aca3beaaSApple OSS Distributions chunksize = P2ROUNDUP(chunksize, align);
351*aca3beaaSApple OSS Distributions cp->mc_slab_zone = zone_create(cp->mc_name, chunksize,
352*aca3beaaSApple OSS Distributions ZC_PGZ_USE_GUARDS | ZC_DESTRUCTIBLE);
353*aca3beaaSApple OSS Distributions }
354*aca3beaaSApple OSS Distributions cp->mc_chunksize = chunksize;
355*aca3beaaSApple OSS Distributions
356*aca3beaaSApple OSS Distributions /*
357*aca3beaaSApple OSS Distributions * Initialize the bucket layer.
358*aca3beaaSApple OSS Distributions */
359*aca3beaaSApple OSS Distributions (void) snprintf(lck_name, sizeof(lck_name), "%s.bkt", cp->mc_name);
360*aca3beaaSApple OSS Distributions cp->mc_bkt_lock_grp = lck_grp_alloc_init(lck_name,
361*aca3beaaSApple OSS Distributions LCK_GRP_ATTR_NULL);
362*aca3beaaSApple OSS Distributions lck_mtx_init(&cp->mc_bkt_lock, cp->mc_bkt_lock_grp, LCK_ATTR_NULL);
363*aca3beaaSApple OSS Distributions
364*aca3beaaSApple OSS Distributions (void) snprintf(lck_name, sizeof(lck_name), "%s.sync", cp->mc_name);
365*aca3beaaSApple OSS Distributions cp->mc_sync_lock_grp = lck_grp_alloc_init(lck_name,
366*aca3beaaSApple OSS Distributions LCK_GRP_ATTR_NULL);
367*aca3beaaSApple OSS Distributions lck_mtx_init(&cp->mc_sync_lock, cp->mc_sync_lock_grp, LCK_ATTR_NULL);
368*aca3beaaSApple OSS Distributions
369*aca3beaaSApple OSS Distributions for (btp = mcache_bkttype; chunksize <= btp->bt_minbuf; btp++) {
370*aca3beaaSApple OSS Distributions continue;
371*aca3beaaSApple OSS Distributions }
372*aca3beaaSApple OSS Distributions
373*aca3beaaSApple OSS Distributions cp->cache_bkttype = btp;
374*aca3beaaSApple OSS Distributions
375*aca3beaaSApple OSS Distributions /*
376*aca3beaaSApple OSS Distributions * Initialize the CPU layer. Each per-CPU structure is aligned
377*aca3beaaSApple OSS Distributions * on the CPU cache line boundary to prevent false sharing.
378*aca3beaaSApple OSS Distributions */
379*aca3beaaSApple OSS Distributions for (c = 0; c < ncpu; c++) {
380*aca3beaaSApple OSS Distributions mcache_cpu_t *ccp = &cp->mc_cpu[c];
381*aca3beaaSApple OSS Distributions
382*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(ccp, CPU_CACHE_LINE_SIZE));
383*aca3beaaSApple OSS Distributions lck_mtx_init(&ccp->cc_lock, cp->mc_cpu_lock_grp, LCK_ATTR_NULL);
384*aca3beaaSApple OSS Distributions ccp->cc_objs = -1;
385*aca3beaaSApple OSS Distributions ccp->cc_pobjs = -1;
386*aca3beaaSApple OSS Distributions }
387*aca3beaaSApple OSS Distributions
388*aca3beaaSApple OSS Distributions if (mcache_ready) {
389*aca3beaaSApple OSS Distributions mcache_cache_bkt_enable(cp);
390*aca3beaaSApple OSS Distributions }
391*aca3beaaSApple OSS Distributions
392*aca3beaaSApple OSS Distributions /* TODO: dynamically create sysctl for stats */
393*aca3beaaSApple OSS Distributions
394*aca3beaaSApple OSS Distributions MCACHE_LIST_LOCK();
395*aca3beaaSApple OSS Distributions LIST_INSERT_HEAD(&mcache_head, cp, mc_list);
396*aca3beaaSApple OSS Distributions MCACHE_LIST_UNLOCK();
397*aca3beaaSApple OSS Distributions
398*aca3beaaSApple OSS Distributions /*
399*aca3beaaSApple OSS Distributions * If cache buckets are enabled and this is the first cache
400*aca3beaaSApple OSS Distributions * created, start the periodic cache update.
401*aca3beaaSApple OSS Distributions */
402*aca3beaaSApple OSS Distributions if (!(mcache_flags & MCF_NOCPUCACHE) && !mcache_updating) {
403*aca3beaaSApple OSS Distributions mcache_updating = 1;
404*aca3beaaSApple OSS Distributions mcache_update_timeout(NULL);
405*aca3beaaSApple OSS Distributions }
406*aca3beaaSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
407*aca3beaaSApple OSS Distributions printf("mcache_create: %s (%s) arg %p bufsize %lu align %lu "
408*aca3beaaSApple OSS Distributions "chunksize %lu bktsize %d\n", name, need_zone ? "i" : "e",
409*aca3beaaSApple OSS Distributions arg, bufsize, cp->mc_align, chunksize, btp->bt_bktsize);
410*aca3beaaSApple OSS Distributions }
411*aca3beaaSApple OSS Distributions return cp;
412*aca3beaaSApple OSS Distributions }
413*aca3beaaSApple OSS Distributions
414*aca3beaaSApple OSS Distributions /*
415*aca3beaaSApple OSS Distributions * Allocate one or more objects from a cache.
416*aca3beaaSApple OSS Distributions */
417*aca3beaaSApple OSS Distributions __private_extern__ unsigned int
mcache_alloc_ext(mcache_t * cp,mcache_obj_t ** list,unsigned int num,int wait)418*aca3beaaSApple OSS Distributions mcache_alloc_ext(mcache_t *cp, mcache_obj_t **list, unsigned int num, int wait)
419*aca3beaaSApple OSS Distributions {
420*aca3beaaSApple OSS Distributions mcache_cpu_t *ccp;
421*aca3beaaSApple OSS Distributions mcache_obj_t **top = &(*list);
422*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt;
423*aca3beaaSApple OSS Distributions unsigned int need = num;
424*aca3beaaSApple OSS Distributions boolean_t nwretry = FALSE;
425*aca3beaaSApple OSS Distributions
426*aca3beaaSApple OSS Distributions /* MCR_NOSLEEP and MCR_FAILOK are mutually exclusive */
427*aca3beaaSApple OSS Distributions VERIFY((wait & (MCR_NOSLEEP | MCR_FAILOK)) != (MCR_NOSLEEP | MCR_FAILOK));
428*aca3beaaSApple OSS Distributions
429*aca3beaaSApple OSS Distributions ASSERT(list != NULL);
430*aca3beaaSApple OSS Distributions *list = NULL;
431*aca3beaaSApple OSS Distributions
432*aca3beaaSApple OSS Distributions if (num == 0) {
433*aca3beaaSApple OSS Distributions return 0;
434*aca3beaaSApple OSS Distributions }
435*aca3beaaSApple OSS Distributions
436*aca3beaaSApple OSS Distributions retry_alloc:
437*aca3beaaSApple OSS Distributions /* We may not always be running in the same CPU in case of retries */
438*aca3beaaSApple OSS Distributions ccp = MCACHE_CPU(cp);
439*aca3beaaSApple OSS Distributions
440*aca3beaaSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
441*aca3beaaSApple OSS Distributions for (;;) {
442*aca3beaaSApple OSS Distributions /*
443*aca3beaaSApple OSS Distributions * If we have an object in the current CPU's filled bucket,
444*aca3beaaSApple OSS Distributions * chain the object to any previous objects and return if
445*aca3beaaSApple OSS Distributions * we've satisfied the number of requested objects.
446*aca3beaaSApple OSS Distributions */
447*aca3beaaSApple OSS Distributions if (ccp->cc_objs > 0) {
448*aca3beaaSApple OSS Distributions mcache_obj_t *tail;
449*aca3beaaSApple OSS Distributions int objs;
450*aca3beaaSApple OSS Distributions
451*aca3beaaSApple OSS Distributions /*
452*aca3beaaSApple OSS Distributions * Objects in the bucket are already linked together
453*aca3beaaSApple OSS Distributions * with the most recently freed object at the head of
454*aca3beaaSApple OSS Distributions * the list; grab as many objects as we can.
455*aca3beaaSApple OSS Distributions */
456*aca3beaaSApple OSS Distributions objs = MIN((unsigned int)ccp->cc_objs, need);
457*aca3beaaSApple OSS Distributions *list = ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
458*aca3beaaSApple OSS Distributions ccp->cc_objs -= objs;
459*aca3beaaSApple OSS Distributions ccp->cc_alloc += objs;
460*aca3beaaSApple OSS Distributions
461*aca3beaaSApple OSS Distributions tail = ccp->cc_filled->bkt_obj[ccp->cc_objs];
462*aca3beaaSApple OSS Distributions list = &tail->obj_next;
463*aca3beaaSApple OSS Distributions *list = NULL;
464*aca3beaaSApple OSS Distributions
465*aca3beaaSApple OSS Distributions /* If we got them all, return to caller */
466*aca3beaaSApple OSS Distributions if ((need -= objs) == 0) {
467*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
468*aca3beaaSApple OSS Distributions
469*aca3beaaSApple OSS Distributions if (!(cp->mc_flags & MCF_NOLEAKLOG) &&
470*aca3beaaSApple OSS Distributions cp->mc_slab_log != NULL) {
471*aca3beaaSApple OSS Distributions (*cp->mc_slab_log)(num, *top, TRUE);
472*aca3beaaSApple OSS Distributions }
473*aca3beaaSApple OSS Distributions
474*aca3beaaSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
475*aca3beaaSApple OSS Distributions goto debug_alloc;
476*aca3beaaSApple OSS Distributions }
477*aca3beaaSApple OSS Distributions
478*aca3beaaSApple OSS Distributions return num;
479*aca3beaaSApple OSS Distributions }
480*aca3beaaSApple OSS Distributions }
481*aca3beaaSApple OSS Distributions
482*aca3beaaSApple OSS Distributions /*
483*aca3beaaSApple OSS Distributions * The CPU's filled bucket is empty. If the previous filled
484*aca3beaaSApple OSS Distributions * bucket was full, exchange and try again.
485*aca3beaaSApple OSS Distributions */
486*aca3beaaSApple OSS Distributions if (ccp->cc_pobjs > 0) {
487*aca3beaaSApple OSS Distributions mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
488*aca3beaaSApple OSS Distributions continue;
489*aca3beaaSApple OSS Distributions }
490*aca3beaaSApple OSS Distributions
491*aca3beaaSApple OSS Distributions /*
492*aca3beaaSApple OSS Distributions * If the bucket layer is disabled, allocate from slab. This
493*aca3beaaSApple OSS Distributions * can happen either because MCF_NOCPUCACHE is set, or because
494*aca3beaaSApple OSS Distributions * the bucket layer is currently being resized.
495*aca3beaaSApple OSS Distributions */
496*aca3beaaSApple OSS Distributions if (ccp->cc_bktsize == 0) {
497*aca3beaaSApple OSS Distributions break;
498*aca3beaaSApple OSS Distributions }
499*aca3beaaSApple OSS Distributions
500*aca3beaaSApple OSS Distributions /*
501*aca3beaaSApple OSS Distributions * Both of the CPU's buckets are empty; try to get full
502*aca3beaaSApple OSS Distributions * bucket(s) from the bucket layer. Upon success, refill
503*aca3beaaSApple OSS Distributions * this CPU and place any empty bucket into the empty list.
504*aca3beaaSApple OSS Distributions * To prevent potential thrashing, replace both empty buckets
505*aca3beaaSApple OSS Distributions * only if the requested count exceeds a bucket's worth of
506*aca3beaaSApple OSS Distributions * objects.
507*aca3beaaSApple OSS Distributions */
508*aca3beaaSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_full,
509*aca3beaaSApple OSS Distributions &bkt, (need <= ccp->cc_bktsize) ? 1 : 2);
510*aca3beaaSApple OSS Distributions if (bkt != NULL) {
511*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt_list = NULL;
512*aca3beaaSApple OSS Distributions
513*aca3beaaSApple OSS Distributions if (ccp->cc_pfilled != NULL) {
514*aca3beaaSApple OSS Distributions ccp->cc_pfilled->bkt_next = bkt_list;
515*aca3beaaSApple OSS Distributions bkt_list = ccp->cc_pfilled;
516*aca3beaaSApple OSS Distributions }
517*aca3beaaSApple OSS Distributions if (bkt->bkt_next == NULL) {
518*aca3beaaSApple OSS Distributions /*
519*aca3beaaSApple OSS Distributions * Bucket layer allocation returns only 1
520*aca3beaaSApple OSS Distributions * magazine; retain current empty magazine.
521*aca3beaaSApple OSS Distributions */
522*aca3beaaSApple OSS Distributions mcache_cpu_refill(ccp, bkt, ccp->cc_bktsize);
523*aca3beaaSApple OSS Distributions } else {
524*aca3beaaSApple OSS Distributions /*
525*aca3beaaSApple OSS Distributions * We got 2 full buckets from the bucket
526*aca3beaaSApple OSS Distributions * layer; release the current empty bucket
527*aca3beaaSApple OSS Distributions * back to the bucket layer.
528*aca3beaaSApple OSS Distributions */
529*aca3beaaSApple OSS Distributions if (ccp->cc_filled != NULL) {
530*aca3beaaSApple OSS Distributions ccp->cc_filled->bkt_next = bkt_list;
531*aca3beaaSApple OSS Distributions bkt_list = ccp->cc_filled;
532*aca3beaaSApple OSS Distributions }
533*aca3beaaSApple OSS Distributions mcache_cpu_batch_refill(ccp, bkt,
534*aca3beaaSApple OSS Distributions ccp->cc_bktsize);
535*aca3beaaSApple OSS Distributions }
536*aca3beaaSApple OSS Distributions mcache_bkt_batch_free(cp, &cp->mc_empty, bkt_list);
537*aca3beaaSApple OSS Distributions continue;
538*aca3beaaSApple OSS Distributions }
539*aca3beaaSApple OSS Distributions
540*aca3beaaSApple OSS Distributions /*
541*aca3beaaSApple OSS Distributions * The bucket layer has no full buckets; allocate the
542*aca3beaaSApple OSS Distributions * object(s) directly from the slab layer.
543*aca3beaaSApple OSS Distributions */
544*aca3beaaSApple OSS Distributions break;
545*aca3beaaSApple OSS Distributions }
546*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
547*aca3beaaSApple OSS Distributions
548*aca3beaaSApple OSS Distributions need -= (*cp->mc_slab_alloc)(cp->mc_private, &list, need, wait);
549*aca3beaaSApple OSS Distributions
550*aca3beaaSApple OSS Distributions /*
551*aca3beaaSApple OSS Distributions * If this is a blocking allocation, or if it is non-blocking and
552*aca3beaaSApple OSS Distributions * the cache's full bucket is non-empty, then retry the allocation.
553*aca3beaaSApple OSS Distributions */
554*aca3beaaSApple OSS Distributions if (need > 0) {
555*aca3beaaSApple OSS Distributions if (!(wait & MCR_NONBLOCKING)) {
556*aca3beaaSApple OSS Distributions atomic_add_32(&cp->mc_wretry_cnt, 1);
557*aca3beaaSApple OSS Distributions goto retry_alloc;
558*aca3beaaSApple OSS Distributions } else if ((wait & (MCR_NOSLEEP | MCR_TRYHARD)) &&
559*aca3beaaSApple OSS Distributions !mcache_bkt_isempty(cp)) {
560*aca3beaaSApple OSS Distributions if (!nwretry) {
561*aca3beaaSApple OSS Distributions nwretry = TRUE;
562*aca3beaaSApple OSS Distributions }
563*aca3beaaSApple OSS Distributions atomic_add_32(&cp->mc_nwretry_cnt, 1);
564*aca3beaaSApple OSS Distributions goto retry_alloc;
565*aca3beaaSApple OSS Distributions } else if (nwretry) {
566*aca3beaaSApple OSS Distributions atomic_add_32(&cp->mc_nwfail_cnt, 1);
567*aca3beaaSApple OSS Distributions }
568*aca3beaaSApple OSS Distributions }
569*aca3beaaSApple OSS Distributions
570*aca3beaaSApple OSS Distributions if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) {
571*aca3beaaSApple OSS Distributions (*cp->mc_slab_log)((num - need), *top, TRUE);
572*aca3beaaSApple OSS Distributions }
573*aca3beaaSApple OSS Distributions
574*aca3beaaSApple OSS Distributions if (!(cp->mc_flags & MCF_DEBUG)) {
575*aca3beaaSApple OSS Distributions return num - need;
576*aca3beaaSApple OSS Distributions }
577*aca3beaaSApple OSS Distributions
578*aca3beaaSApple OSS Distributions debug_alloc:
579*aca3beaaSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
580*aca3beaaSApple OSS Distributions mcache_obj_t **o = top;
581*aca3beaaSApple OSS Distributions unsigned int n;
582*aca3beaaSApple OSS Distributions
583*aca3beaaSApple OSS Distributions n = 0;
584*aca3beaaSApple OSS Distributions /*
585*aca3beaaSApple OSS Distributions * Verify that the chain of objects have the same count as
586*aca3beaaSApple OSS Distributions * what we are about to report to the caller. Any mismatch
587*aca3beaaSApple OSS Distributions * here means that the object list is insanely broken and
588*aca3beaaSApple OSS Distributions * therefore we must panic.
589*aca3beaaSApple OSS Distributions */
590*aca3beaaSApple OSS Distributions while (*o != NULL) {
591*aca3beaaSApple OSS Distributions o = &(*o)->obj_next;
592*aca3beaaSApple OSS Distributions ++n;
593*aca3beaaSApple OSS Distributions }
594*aca3beaaSApple OSS Distributions if (n != (num - need)) {
595*aca3beaaSApple OSS Distributions panic("mcache_alloc_ext: %s cp %p corrupted list "
596*aca3beaaSApple OSS Distributions "(got %d actual %d)\n", cp->mc_name,
597*aca3beaaSApple OSS Distributions (void *)cp, num - need, n);
598*aca3beaaSApple OSS Distributions /* NOTREACHED */
599*aca3beaaSApple OSS Distributions __builtin_unreachable();
600*aca3beaaSApple OSS Distributions }
601*aca3beaaSApple OSS Distributions }
602*aca3beaaSApple OSS Distributions
603*aca3beaaSApple OSS Distributions /* Invoke the slab layer audit callback if auditing is enabled */
604*aca3beaaSApple OSS Distributions if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) {
605*aca3beaaSApple OSS Distributions (*cp->mc_slab_audit)(cp->mc_private, *top, TRUE);
606*aca3beaaSApple OSS Distributions }
607*aca3beaaSApple OSS Distributions
608*aca3beaaSApple OSS Distributions return num - need;
609*aca3beaaSApple OSS Distributions }
610*aca3beaaSApple OSS Distributions
611*aca3beaaSApple OSS Distributions /*
612*aca3beaaSApple OSS Distributions * Allocate a single object from a cache.
613*aca3beaaSApple OSS Distributions */
614*aca3beaaSApple OSS Distributions __private_extern__ void *
mcache_alloc(mcache_t * cp,int wait)615*aca3beaaSApple OSS Distributions mcache_alloc(mcache_t *cp, int wait)
616*aca3beaaSApple OSS Distributions {
617*aca3beaaSApple OSS Distributions mcache_obj_t *buf;
618*aca3beaaSApple OSS Distributions
619*aca3beaaSApple OSS Distributions (void) mcache_alloc_ext(cp, &buf, 1, wait);
620*aca3beaaSApple OSS Distributions return buf;
621*aca3beaaSApple OSS Distributions }
622*aca3beaaSApple OSS Distributions
623*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_waiter_inc(mcache_t * cp)624*aca3beaaSApple OSS Distributions mcache_waiter_inc(mcache_t *cp)
625*aca3beaaSApple OSS Distributions {
626*aca3beaaSApple OSS Distributions atomic_add_32(&cp->mc_waiter_cnt, 1);
627*aca3beaaSApple OSS Distributions }
628*aca3beaaSApple OSS Distributions
629*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_waiter_dec(mcache_t * cp)630*aca3beaaSApple OSS Distributions mcache_waiter_dec(mcache_t *cp)
631*aca3beaaSApple OSS Distributions {
632*aca3beaaSApple OSS Distributions atomic_add_32(&cp->mc_waiter_cnt, -1);
633*aca3beaaSApple OSS Distributions }
634*aca3beaaSApple OSS Distributions
635*aca3beaaSApple OSS Distributions __private_extern__ boolean_t
mcache_bkt_isempty(mcache_t * cp)636*aca3beaaSApple OSS Distributions mcache_bkt_isempty(mcache_t *cp)
637*aca3beaaSApple OSS Distributions {
638*aca3beaaSApple OSS Distributions /*
639*aca3beaaSApple OSS Distributions * This isn't meant to accurately tell whether there are
640*aca3beaaSApple OSS Distributions * any full buckets in the cache; it is simply a way to
641*aca3beaaSApple OSS Distributions * obtain "hints" about the state of the cache.
642*aca3beaaSApple OSS Distributions */
643*aca3beaaSApple OSS Distributions return cp->mc_full.bl_total == 0;
644*aca3beaaSApple OSS Distributions }
645*aca3beaaSApple OSS Distributions
646*aca3beaaSApple OSS Distributions /*
647*aca3beaaSApple OSS Distributions * Notify the slab layer about an event.
648*aca3beaaSApple OSS Distributions */
649*aca3beaaSApple OSS Distributions static void
mcache_notify(mcache_t * cp,u_int32_t event)650*aca3beaaSApple OSS Distributions mcache_notify(mcache_t *cp, u_int32_t event)
651*aca3beaaSApple OSS Distributions {
652*aca3beaaSApple OSS Distributions if (cp->mc_slab_notify != NULL) {
653*aca3beaaSApple OSS Distributions (*cp->mc_slab_notify)(cp->mc_private, event);
654*aca3beaaSApple OSS Distributions }
655*aca3beaaSApple OSS Distributions }
656*aca3beaaSApple OSS Distributions
657*aca3beaaSApple OSS Distributions /*
658*aca3beaaSApple OSS Distributions * Purge the cache and disable its buckets.
659*aca3beaaSApple OSS Distributions */
660*aca3beaaSApple OSS Distributions static void
mcache_purge(void * arg)661*aca3beaaSApple OSS Distributions mcache_purge(void *arg)
662*aca3beaaSApple OSS Distributions {
663*aca3beaaSApple OSS Distributions mcache_t *cp = arg;
664*aca3beaaSApple OSS Distributions
665*aca3beaaSApple OSS Distributions mcache_bkt_purge(cp);
666*aca3beaaSApple OSS Distributions /*
667*aca3beaaSApple OSS Distributions * We cannot simply call mcache_cache_bkt_enable() from here as
668*aca3beaaSApple OSS Distributions * a bucket resize may be in flight and we would cause the CPU
669*aca3beaaSApple OSS Distributions * layers of the cache to point to different sizes. Therefore,
670*aca3beaaSApple OSS Distributions * we simply increment the enable count so that during the next
671*aca3beaaSApple OSS Distributions * periodic cache update the buckets can be reenabled.
672*aca3beaaSApple OSS Distributions */
673*aca3beaaSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
674*aca3beaaSApple OSS Distributions cp->mc_enable_cnt++;
675*aca3beaaSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
676*aca3beaaSApple OSS Distributions }
677*aca3beaaSApple OSS Distributions
678*aca3beaaSApple OSS Distributions __private_extern__ boolean_t
mcache_purge_cache(mcache_t * cp,boolean_t async)679*aca3beaaSApple OSS Distributions mcache_purge_cache(mcache_t *cp, boolean_t async)
680*aca3beaaSApple OSS Distributions {
681*aca3beaaSApple OSS Distributions /*
682*aca3beaaSApple OSS Distributions * Purging a cache that has no per-CPU caches or is already
683*aca3beaaSApple OSS Distributions * in the process of being purged is rather pointless.
684*aca3beaaSApple OSS Distributions */
685*aca3beaaSApple OSS Distributions if (cp->mc_flags & MCF_NOCPUCACHE) {
686*aca3beaaSApple OSS Distributions return FALSE;
687*aca3beaaSApple OSS Distributions }
688*aca3beaaSApple OSS Distributions
689*aca3beaaSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
690*aca3beaaSApple OSS Distributions if (cp->mc_purge_cnt > 0) {
691*aca3beaaSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
692*aca3beaaSApple OSS Distributions return FALSE;
693*aca3beaaSApple OSS Distributions }
694*aca3beaaSApple OSS Distributions cp->mc_purge_cnt++;
695*aca3beaaSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
696*aca3beaaSApple OSS Distributions
697*aca3beaaSApple OSS Distributions if (async) {
698*aca3beaaSApple OSS Distributions mcache_dispatch(mcache_purge, cp);
699*aca3beaaSApple OSS Distributions } else {
700*aca3beaaSApple OSS Distributions mcache_purge(cp);
701*aca3beaaSApple OSS Distributions }
702*aca3beaaSApple OSS Distributions
703*aca3beaaSApple OSS Distributions return TRUE;
704*aca3beaaSApple OSS Distributions }
705*aca3beaaSApple OSS Distributions
706*aca3beaaSApple OSS Distributions /*
707*aca3beaaSApple OSS Distributions * Free a single object to a cache.
708*aca3beaaSApple OSS Distributions */
709*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_free(mcache_t * cp,void * buf)710*aca3beaaSApple OSS Distributions mcache_free(mcache_t *cp, void *buf)
711*aca3beaaSApple OSS Distributions {
712*aca3beaaSApple OSS Distributions ((mcache_obj_t *)buf)->obj_next = NULL;
713*aca3beaaSApple OSS Distributions mcache_free_ext(cp, (mcache_obj_t *)buf);
714*aca3beaaSApple OSS Distributions }
715*aca3beaaSApple OSS Distributions
716*aca3beaaSApple OSS Distributions /*
717*aca3beaaSApple OSS Distributions * Free one or more objects to a cache.
718*aca3beaaSApple OSS Distributions */
719*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_free_ext(mcache_t * cp,mcache_obj_t * list)720*aca3beaaSApple OSS Distributions mcache_free_ext(mcache_t *cp, mcache_obj_t *list)
721*aca3beaaSApple OSS Distributions {
722*aca3beaaSApple OSS Distributions mcache_cpu_t *ccp = MCACHE_CPU(cp);
723*aca3beaaSApple OSS Distributions mcache_bkttype_t *btp;
724*aca3beaaSApple OSS Distributions mcache_obj_t *nlist;
725*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt;
726*aca3beaaSApple OSS Distributions
727*aca3beaaSApple OSS Distributions if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) {
728*aca3beaaSApple OSS Distributions (*cp->mc_slab_log)(0, list, FALSE);
729*aca3beaaSApple OSS Distributions }
730*aca3beaaSApple OSS Distributions
731*aca3beaaSApple OSS Distributions /* Invoke the slab layer audit callback if auditing is enabled */
732*aca3beaaSApple OSS Distributions if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) {
733*aca3beaaSApple OSS Distributions (*cp->mc_slab_audit)(cp->mc_private, list, FALSE);
734*aca3beaaSApple OSS Distributions }
735*aca3beaaSApple OSS Distributions
736*aca3beaaSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
737*aca3beaaSApple OSS Distributions for (;;) {
738*aca3beaaSApple OSS Distributions /*
739*aca3beaaSApple OSS Distributions * If there is space in the current CPU's filled bucket, put
740*aca3beaaSApple OSS Distributions * the object there and return once all objects are freed.
741*aca3beaaSApple OSS Distributions * Note the cast to unsigned integer takes care of the case
742*aca3beaaSApple OSS Distributions * where the bucket layer is disabled (when cc_objs is -1).
743*aca3beaaSApple OSS Distributions */
744*aca3beaaSApple OSS Distributions if ((unsigned int)ccp->cc_objs <
745*aca3beaaSApple OSS Distributions (unsigned int)ccp->cc_bktsize) {
746*aca3beaaSApple OSS Distributions /*
747*aca3beaaSApple OSS Distributions * Reverse the list while we place the object into the
748*aca3beaaSApple OSS Distributions * bucket; this effectively causes the most recently
749*aca3beaaSApple OSS Distributions * freed object(s) to be reused during allocation.
750*aca3beaaSApple OSS Distributions */
751*aca3beaaSApple OSS Distributions nlist = list->obj_next;
752*aca3beaaSApple OSS Distributions list->obj_next = (ccp->cc_objs == 0) ? NULL :
753*aca3beaaSApple OSS Distributions ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
754*aca3beaaSApple OSS Distributions ccp->cc_filled->bkt_obj[ccp->cc_objs++] = list;
755*aca3beaaSApple OSS Distributions ccp->cc_free++;
756*aca3beaaSApple OSS Distributions
757*aca3beaaSApple OSS Distributions if ((list = nlist) != NULL) {
758*aca3beaaSApple OSS Distributions continue;
759*aca3beaaSApple OSS Distributions }
760*aca3beaaSApple OSS Distributions
761*aca3beaaSApple OSS Distributions /* We are done; return to caller */
762*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
763*aca3beaaSApple OSS Distributions
764*aca3beaaSApple OSS Distributions /* If there is a waiter below, notify it */
765*aca3beaaSApple OSS Distributions if (cp->mc_waiter_cnt > 0) {
766*aca3beaaSApple OSS Distributions mcache_notify(cp, MCN_RETRYALLOC);
767*aca3beaaSApple OSS Distributions }
768*aca3beaaSApple OSS Distributions return;
769*aca3beaaSApple OSS Distributions }
770*aca3beaaSApple OSS Distributions
771*aca3beaaSApple OSS Distributions /*
772*aca3beaaSApple OSS Distributions * The CPU's filled bucket is full. If the previous filled
773*aca3beaaSApple OSS Distributions * bucket was empty, exchange and try again.
774*aca3beaaSApple OSS Distributions */
775*aca3beaaSApple OSS Distributions if (ccp->cc_pobjs == 0) {
776*aca3beaaSApple OSS Distributions mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
777*aca3beaaSApple OSS Distributions continue;
778*aca3beaaSApple OSS Distributions }
779*aca3beaaSApple OSS Distributions
780*aca3beaaSApple OSS Distributions /*
781*aca3beaaSApple OSS Distributions * If the bucket layer is disabled, free to slab. This can
782*aca3beaaSApple OSS Distributions * happen either because MCF_NOCPUCACHE is set, or because
783*aca3beaaSApple OSS Distributions * the bucket layer is currently being resized.
784*aca3beaaSApple OSS Distributions */
785*aca3beaaSApple OSS Distributions if (ccp->cc_bktsize == 0) {
786*aca3beaaSApple OSS Distributions break;
787*aca3beaaSApple OSS Distributions }
788*aca3beaaSApple OSS Distributions
789*aca3beaaSApple OSS Distributions /*
790*aca3beaaSApple OSS Distributions * Both of the CPU's buckets are full; try to get empty
791*aca3beaaSApple OSS Distributions * buckets from the bucket layer. Upon success, empty this
792*aca3beaaSApple OSS Distributions * CPU and place any full bucket into the full list.
793*aca3beaaSApple OSS Distributions *
794*aca3beaaSApple OSS Distributions * TODO: Because the caller currently doesn't indicate
795*aca3beaaSApple OSS Distributions * the number of objects in the list, we choose the more
796*aca3beaaSApple OSS Distributions * conservative approach of allocating only 1 empty
797*aca3beaaSApple OSS Distributions * bucket (to prevent potential thrashing). Once we
798*aca3beaaSApple OSS Distributions * have the object count, we can replace 1 with similar
799*aca3beaaSApple OSS Distributions * logic as used in mcache_alloc_ext().
800*aca3beaaSApple OSS Distributions */
801*aca3beaaSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_empty, &bkt, 1);
802*aca3beaaSApple OSS Distributions if (bkt != NULL) {
803*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt_list = NULL;
804*aca3beaaSApple OSS Distributions
805*aca3beaaSApple OSS Distributions if (ccp->cc_pfilled != NULL) {
806*aca3beaaSApple OSS Distributions ccp->cc_pfilled->bkt_next = bkt_list;
807*aca3beaaSApple OSS Distributions bkt_list = ccp->cc_pfilled;
808*aca3beaaSApple OSS Distributions }
809*aca3beaaSApple OSS Distributions if (bkt->bkt_next == NULL) {
810*aca3beaaSApple OSS Distributions /*
811*aca3beaaSApple OSS Distributions * Bucket layer allocation returns only 1
812*aca3beaaSApple OSS Distributions * bucket; retain current full bucket.
813*aca3beaaSApple OSS Distributions */
814*aca3beaaSApple OSS Distributions mcache_cpu_refill(ccp, bkt, 0);
815*aca3beaaSApple OSS Distributions } else {
816*aca3beaaSApple OSS Distributions /*
817*aca3beaaSApple OSS Distributions * We got 2 empty buckets from the bucket
818*aca3beaaSApple OSS Distributions * layer; release the current full bucket
819*aca3beaaSApple OSS Distributions * back to the bucket layer.
820*aca3beaaSApple OSS Distributions */
821*aca3beaaSApple OSS Distributions if (ccp->cc_filled != NULL) {
822*aca3beaaSApple OSS Distributions ccp->cc_filled->bkt_next = bkt_list;
823*aca3beaaSApple OSS Distributions bkt_list = ccp->cc_filled;
824*aca3beaaSApple OSS Distributions }
825*aca3beaaSApple OSS Distributions mcache_cpu_batch_refill(ccp, bkt, 0);
826*aca3beaaSApple OSS Distributions }
827*aca3beaaSApple OSS Distributions mcache_bkt_batch_free(cp, &cp->mc_full, bkt_list);
828*aca3beaaSApple OSS Distributions continue;
829*aca3beaaSApple OSS Distributions }
830*aca3beaaSApple OSS Distributions btp = cp->cache_bkttype;
831*aca3beaaSApple OSS Distributions
832*aca3beaaSApple OSS Distributions /*
833*aca3beaaSApple OSS Distributions * We need an empty bucket to put our freed objects into
834*aca3beaaSApple OSS Distributions * but couldn't get an empty bucket from the bucket layer;
835*aca3beaaSApple OSS Distributions * attempt to allocate one. We do not want to block for
836*aca3beaaSApple OSS Distributions * allocation here, and if the bucket allocation fails
837*aca3beaaSApple OSS Distributions * we will simply fall through to the slab layer.
838*aca3beaaSApple OSS Distributions */
839*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
840*aca3beaaSApple OSS Distributions bkt = mcache_alloc(btp->bt_cache, MCR_NOSLEEP);
841*aca3beaaSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
842*aca3beaaSApple OSS Distributions
843*aca3beaaSApple OSS Distributions if (bkt != NULL) {
844*aca3beaaSApple OSS Distributions /*
845*aca3beaaSApple OSS Distributions * We have an empty bucket, but since we drop the
846*aca3beaaSApple OSS Distributions * CPU lock above, the cache's bucket size may have
847*aca3beaaSApple OSS Distributions * changed. If so, free the bucket and try again.
848*aca3beaaSApple OSS Distributions */
849*aca3beaaSApple OSS Distributions if (ccp->cc_bktsize != btp->bt_bktsize) {
850*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
851*aca3beaaSApple OSS Distributions mcache_free(btp->bt_cache, bkt);
852*aca3beaaSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
853*aca3beaaSApple OSS Distributions continue;
854*aca3beaaSApple OSS Distributions }
855*aca3beaaSApple OSS Distributions
856*aca3beaaSApple OSS Distributions /*
857*aca3beaaSApple OSS Distributions * Store it in the bucket object since we'll
858*aca3beaaSApple OSS Distributions * need to refer to it during bucket destroy;
859*aca3beaaSApple OSS Distributions * we can't safely refer to cache_bkttype as
860*aca3beaaSApple OSS Distributions * the bucket lock may not be acquired then.
861*aca3beaaSApple OSS Distributions */
862*aca3beaaSApple OSS Distributions bkt->bkt_type = btp;
863*aca3beaaSApple OSS Distributions
864*aca3beaaSApple OSS Distributions /*
865*aca3beaaSApple OSS Distributions * We have an empty bucket of the right size;
866*aca3beaaSApple OSS Distributions * add it to the bucket layer and try again.
867*aca3beaaSApple OSS Distributions */
868*aca3beaaSApple OSS Distributions ASSERT(bkt->bkt_next == NULL);
869*aca3beaaSApple OSS Distributions mcache_bkt_batch_free(cp, &cp->mc_empty, bkt);
870*aca3beaaSApple OSS Distributions continue;
871*aca3beaaSApple OSS Distributions }
872*aca3beaaSApple OSS Distributions
873*aca3beaaSApple OSS Distributions /*
874*aca3beaaSApple OSS Distributions * The bucket layer has no empty buckets; free the
875*aca3beaaSApple OSS Distributions * object(s) directly to the slab layer.
876*aca3beaaSApple OSS Distributions */
877*aca3beaaSApple OSS Distributions break;
878*aca3beaaSApple OSS Distributions }
879*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
880*aca3beaaSApple OSS Distributions
881*aca3beaaSApple OSS Distributions /* If there is a waiter below, notify it */
882*aca3beaaSApple OSS Distributions if (cp->mc_waiter_cnt > 0) {
883*aca3beaaSApple OSS Distributions mcache_notify(cp, MCN_RETRYALLOC);
884*aca3beaaSApple OSS Distributions }
885*aca3beaaSApple OSS Distributions
886*aca3beaaSApple OSS Distributions /* Advise the slab layer to purge the object(s) */
887*aca3beaaSApple OSS Distributions (*cp->mc_slab_free)(cp->mc_private, list,
888*aca3beaaSApple OSS Distributions (cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
889*aca3beaaSApple OSS Distributions }
890*aca3beaaSApple OSS Distributions
891*aca3beaaSApple OSS Distributions /*
892*aca3beaaSApple OSS Distributions * Cache destruction routine.
893*aca3beaaSApple OSS Distributions */
894*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_destroy(mcache_t * cp)895*aca3beaaSApple OSS Distributions mcache_destroy(mcache_t *cp)
896*aca3beaaSApple OSS Distributions {
897*aca3beaaSApple OSS Distributions void **pbuf;
898*aca3beaaSApple OSS Distributions
899*aca3beaaSApple OSS Distributions MCACHE_LIST_LOCK();
900*aca3beaaSApple OSS Distributions LIST_REMOVE(cp, mc_list);
901*aca3beaaSApple OSS Distributions MCACHE_LIST_UNLOCK();
902*aca3beaaSApple OSS Distributions
903*aca3beaaSApple OSS Distributions mcache_bkt_purge(cp);
904*aca3beaaSApple OSS Distributions
905*aca3beaaSApple OSS Distributions /*
906*aca3beaaSApple OSS Distributions * This cache is dead; there should be no further transaction.
907*aca3beaaSApple OSS Distributions * If it's still invoked, make sure that it induces a fault.
908*aca3beaaSApple OSS Distributions */
909*aca3beaaSApple OSS Distributions cp->mc_slab_alloc = NULL;
910*aca3beaaSApple OSS Distributions cp->mc_slab_free = NULL;
911*aca3beaaSApple OSS Distributions cp->mc_slab_audit = NULL;
912*aca3beaaSApple OSS Distributions
913*aca3beaaSApple OSS Distributions lck_grp_free(cp->mc_bkt_lock_grp);
914*aca3beaaSApple OSS Distributions lck_grp_free(cp->mc_cpu_lock_grp);
915*aca3beaaSApple OSS Distributions lck_grp_free(cp->mc_sync_lock_grp);
916*aca3beaaSApple OSS Distributions
917*aca3beaaSApple OSS Distributions /*
918*aca3beaaSApple OSS Distributions * TODO: We need to destroy the zone here, but cannot do it
919*aca3beaaSApple OSS Distributions * because there is no such way to achieve that. Until then
920*aca3beaaSApple OSS Distributions * the memory allocated for the zone structure is leaked.
921*aca3beaaSApple OSS Distributions * Once it is achievable, uncomment these lines:
922*aca3beaaSApple OSS Distributions *
923*aca3beaaSApple OSS Distributions * if (cp->mc_slab_zone != NULL) {
924*aca3beaaSApple OSS Distributions * zdestroy(cp->mc_slab_zone);
925*aca3beaaSApple OSS Distributions * cp->mc_slab_zone = NULL;
926*aca3beaaSApple OSS Distributions * }
927*aca3beaaSApple OSS Distributions */
928*aca3beaaSApple OSS Distributions
929*aca3beaaSApple OSS Distributions /* Get the original address since we're about to free it */
930*aca3beaaSApple OSS Distributions pbuf = (void **)((intptr_t)cp - sizeof(void *));
931*aca3beaaSApple OSS Distributions
932*aca3beaaSApple OSS Distributions zfree(mcache_zone, *pbuf);
933*aca3beaaSApple OSS Distributions }
934*aca3beaaSApple OSS Distributions
935*aca3beaaSApple OSS Distributions /*
936*aca3beaaSApple OSS Distributions * Internal slab allocator used as a backend for simple caches. The current
937*aca3beaaSApple OSS Distributions * implementation uses the zone allocator for simplicity reasons.
938*aca3beaaSApple OSS Distributions */
939*aca3beaaSApple OSS Distributions static unsigned int
mcache_slab_alloc(void * arg,mcache_obj_t *** plist,unsigned int num,int wait)940*aca3beaaSApple OSS Distributions mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num,
941*aca3beaaSApple OSS Distributions int wait)
942*aca3beaaSApple OSS Distributions {
943*aca3beaaSApple OSS Distributions #pragma unused(wait)
944*aca3beaaSApple OSS Distributions mcache_t *cp = arg;
945*aca3beaaSApple OSS Distributions unsigned int need = num;
946*aca3beaaSApple OSS Distributions size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
947*aca3beaaSApple OSS Distributions u_int32_t flags = cp->mc_flags;
948*aca3beaaSApple OSS Distributions void *buf, *base, **pbuf;
949*aca3beaaSApple OSS Distributions mcache_obj_t **list = *plist;
950*aca3beaaSApple OSS Distributions
951*aca3beaaSApple OSS Distributions *list = NULL;
952*aca3beaaSApple OSS Distributions
953*aca3beaaSApple OSS Distributions for (;;) {
954*aca3beaaSApple OSS Distributions buf = zalloc_flags(cp->mc_slab_zone, Z_WAITOK | Z_NOFAIL);
955*aca3beaaSApple OSS Distributions
956*aca3beaaSApple OSS Distributions /* Get the aligned base address for this object */
957*aca3beaaSApple OSS Distributions base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t),
958*aca3beaaSApple OSS Distributions cp->mc_align);
959*aca3beaaSApple OSS Distributions
960*aca3beaaSApple OSS Distributions /*
961*aca3beaaSApple OSS Distributions * Wind back a pointer size from the aligned base and
962*aca3beaaSApple OSS Distributions * save the original address so we can free it later.
963*aca3beaaSApple OSS Distributions */
964*aca3beaaSApple OSS Distributions pbuf = (void **)((intptr_t)base - sizeof(void *));
965*aca3beaaSApple OSS Distributions *pbuf = buf;
966*aca3beaaSApple OSS Distributions
967*aca3beaaSApple OSS Distributions VERIFY(((intptr_t)base + cp->mc_bufsize) <=
968*aca3beaaSApple OSS Distributions ((intptr_t)buf + cp->mc_chunksize));
969*aca3beaaSApple OSS Distributions
970*aca3beaaSApple OSS Distributions /*
971*aca3beaaSApple OSS Distributions * If auditing is enabled, patternize the contents of
972*aca3beaaSApple OSS Distributions * the buffer starting from the 64-bit aligned base to
973*aca3beaaSApple OSS Distributions * the end of the buffer; the length is rounded up to
974*aca3beaaSApple OSS Distributions * the nearest 64-bit multiply; this is because we use
975*aca3beaaSApple OSS Distributions * 64-bit memory access to set/check the pattern.
976*aca3beaaSApple OSS Distributions */
977*aca3beaaSApple OSS Distributions if (flags & MCF_DEBUG) {
978*aca3beaaSApple OSS Distributions VERIFY(((intptr_t)base + rsize) <=
979*aca3beaaSApple OSS Distributions ((intptr_t)buf + cp->mc_chunksize));
980*aca3beaaSApple OSS Distributions mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
981*aca3beaaSApple OSS Distributions }
982*aca3beaaSApple OSS Distributions
983*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(base, cp->mc_align));
984*aca3beaaSApple OSS Distributions *list = (mcache_obj_t *)base;
985*aca3beaaSApple OSS Distributions
986*aca3beaaSApple OSS Distributions (*list)->obj_next = NULL;
987*aca3beaaSApple OSS Distributions list = *plist = &(*list)->obj_next;
988*aca3beaaSApple OSS Distributions
989*aca3beaaSApple OSS Distributions /* If we got them all, return to mcache */
990*aca3beaaSApple OSS Distributions if (--need == 0) {
991*aca3beaaSApple OSS Distributions break;
992*aca3beaaSApple OSS Distributions }
993*aca3beaaSApple OSS Distributions }
994*aca3beaaSApple OSS Distributions
995*aca3beaaSApple OSS Distributions return num - need;
996*aca3beaaSApple OSS Distributions }
997*aca3beaaSApple OSS Distributions
998*aca3beaaSApple OSS Distributions /*
999*aca3beaaSApple OSS Distributions * Internal slab deallocator used as a backend for simple caches.
1000*aca3beaaSApple OSS Distributions */
1001*aca3beaaSApple OSS Distributions static void
mcache_slab_free(void * arg,mcache_obj_t * list,__unused boolean_t purged)1002*aca3beaaSApple OSS Distributions mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged)
1003*aca3beaaSApple OSS Distributions {
1004*aca3beaaSApple OSS Distributions mcache_t *cp = arg;
1005*aca3beaaSApple OSS Distributions mcache_obj_t *nlist;
1006*aca3beaaSApple OSS Distributions size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
1007*aca3beaaSApple OSS Distributions u_int32_t flags = cp->mc_flags;
1008*aca3beaaSApple OSS Distributions void *base;
1009*aca3beaaSApple OSS Distributions void **pbuf;
1010*aca3beaaSApple OSS Distributions
1011*aca3beaaSApple OSS Distributions for (;;) {
1012*aca3beaaSApple OSS Distributions nlist = list->obj_next;
1013*aca3beaaSApple OSS Distributions list->obj_next = NULL;
1014*aca3beaaSApple OSS Distributions
1015*aca3beaaSApple OSS Distributions base = list;
1016*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(base, cp->mc_align));
1017*aca3beaaSApple OSS Distributions
1018*aca3beaaSApple OSS Distributions /* Get the original address since we're about to free it */
1019*aca3beaaSApple OSS Distributions pbuf = (void **)((intptr_t)base - sizeof(void *));
1020*aca3beaaSApple OSS Distributions
1021*aca3beaaSApple OSS Distributions VERIFY(((intptr_t)base + cp->mc_bufsize) <=
1022*aca3beaaSApple OSS Distributions ((intptr_t)*pbuf + cp->mc_chunksize));
1023*aca3beaaSApple OSS Distributions
1024*aca3beaaSApple OSS Distributions if (flags & MCF_DEBUG) {
1025*aca3beaaSApple OSS Distributions VERIFY(((intptr_t)base + rsize) <=
1026*aca3beaaSApple OSS Distributions ((intptr_t)*pbuf + cp->mc_chunksize));
1027*aca3beaaSApple OSS Distributions mcache_audit_free_verify(NULL, base, 0, rsize);
1028*aca3beaaSApple OSS Distributions }
1029*aca3beaaSApple OSS Distributions
1030*aca3beaaSApple OSS Distributions /* Free it to zone */
1031*aca3beaaSApple OSS Distributions zfree(cp->mc_slab_zone, *pbuf);
1032*aca3beaaSApple OSS Distributions
1033*aca3beaaSApple OSS Distributions /* No more objects to free; return to mcache */
1034*aca3beaaSApple OSS Distributions if ((list = nlist) == NULL) {
1035*aca3beaaSApple OSS Distributions break;
1036*aca3beaaSApple OSS Distributions }
1037*aca3beaaSApple OSS Distributions }
1038*aca3beaaSApple OSS Distributions }
1039*aca3beaaSApple OSS Distributions
1040*aca3beaaSApple OSS Distributions /*
1041*aca3beaaSApple OSS Distributions * Internal slab auditor for simple caches.
1042*aca3beaaSApple OSS Distributions */
1043*aca3beaaSApple OSS Distributions static void
mcache_slab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)1044*aca3beaaSApple OSS Distributions mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
1045*aca3beaaSApple OSS Distributions {
1046*aca3beaaSApple OSS Distributions mcache_t *cp = arg;
1047*aca3beaaSApple OSS Distributions size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
1048*aca3beaaSApple OSS Distributions void *base, **pbuf;
1049*aca3beaaSApple OSS Distributions
1050*aca3beaaSApple OSS Distributions while (list != NULL) {
1051*aca3beaaSApple OSS Distributions mcache_obj_t *next = list->obj_next;
1052*aca3beaaSApple OSS Distributions
1053*aca3beaaSApple OSS Distributions base = list;
1054*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(base, cp->mc_align));
1055*aca3beaaSApple OSS Distributions
1056*aca3beaaSApple OSS Distributions /* Get the original address */
1057*aca3beaaSApple OSS Distributions pbuf = (void **)((intptr_t)base - sizeof(void *));
1058*aca3beaaSApple OSS Distributions
1059*aca3beaaSApple OSS Distributions VERIFY(((intptr_t)base + rsize) <=
1060*aca3beaaSApple OSS Distributions ((intptr_t)*pbuf + cp->mc_chunksize));
1061*aca3beaaSApple OSS Distributions
1062*aca3beaaSApple OSS Distributions if (!alloc) {
1063*aca3beaaSApple OSS Distributions mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
1064*aca3beaaSApple OSS Distributions } else {
1065*aca3beaaSApple OSS Distributions mcache_audit_free_verify_set(NULL, base, 0, rsize);
1066*aca3beaaSApple OSS Distributions }
1067*aca3beaaSApple OSS Distributions
1068*aca3beaaSApple OSS Distributions list = list->obj_next = next;
1069*aca3beaaSApple OSS Distributions }
1070*aca3beaaSApple OSS Distributions }
1071*aca3beaaSApple OSS Distributions
1072*aca3beaaSApple OSS Distributions /*
1073*aca3beaaSApple OSS Distributions * Refill the CPU's buckets with bkt and its follower (if any).
1074*aca3beaaSApple OSS Distributions */
1075*aca3beaaSApple OSS Distributions static void
mcache_cpu_batch_refill(mcache_cpu_t * ccp,mcache_bkt_t * bkt,int objs)1076*aca3beaaSApple OSS Distributions mcache_cpu_batch_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
1077*aca3beaaSApple OSS Distributions {
1078*aca3beaaSApple OSS Distributions ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
1079*aca3beaaSApple OSS Distributions (ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
1080*aca3beaaSApple OSS Distributions ASSERT(ccp->cc_bktsize > 0);
1081*aca3beaaSApple OSS Distributions
1082*aca3beaaSApple OSS Distributions ccp->cc_filled = bkt;
1083*aca3beaaSApple OSS Distributions ccp->cc_objs = objs;
1084*aca3beaaSApple OSS Distributions if (__probable(bkt->bkt_next != NULL)) {
1085*aca3beaaSApple OSS Distributions ccp->cc_pfilled = bkt->bkt_next;
1086*aca3beaaSApple OSS Distributions ccp->cc_pobjs = objs;
1087*aca3beaaSApple OSS Distributions bkt->bkt_next = NULL;
1088*aca3beaaSApple OSS Distributions } else {
1089*aca3beaaSApple OSS Distributions ASSERT(bkt->bkt_next == NULL);
1090*aca3beaaSApple OSS Distributions ccp->cc_pfilled = NULL;
1091*aca3beaaSApple OSS Distributions ccp->cc_pobjs = -1;
1092*aca3beaaSApple OSS Distributions }
1093*aca3beaaSApple OSS Distributions }
1094*aca3beaaSApple OSS Distributions
1095*aca3beaaSApple OSS Distributions /*
1096*aca3beaaSApple OSS Distributions * Refill the CPU's filled bucket with bkt and save the previous one.
1097*aca3beaaSApple OSS Distributions */
1098*aca3beaaSApple OSS Distributions static void
mcache_cpu_refill(mcache_cpu_t * ccp,mcache_bkt_t * bkt,int objs)1099*aca3beaaSApple OSS Distributions mcache_cpu_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
1100*aca3beaaSApple OSS Distributions {
1101*aca3beaaSApple OSS Distributions ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
1102*aca3beaaSApple OSS Distributions (ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
1103*aca3beaaSApple OSS Distributions ASSERT(ccp->cc_bktsize > 0);
1104*aca3beaaSApple OSS Distributions
1105*aca3beaaSApple OSS Distributions ccp->cc_pfilled = ccp->cc_filled;
1106*aca3beaaSApple OSS Distributions ccp->cc_pobjs = ccp->cc_objs;
1107*aca3beaaSApple OSS Distributions ccp->cc_filled = bkt;
1108*aca3beaaSApple OSS Distributions ccp->cc_objs = objs;
1109*aca3beaaSApple OSS Distributions }
1110*aca3beaaSApple OSS Distributions
1111*aca3beaaSApple OSS Distributions /*
1112*aca3beaaSApple OSS Distributions * Get one or more buckets from the bucket layer.
1113*aca3beaaSApple OSS Distributions */
1114*aca3beaaSApple OSS Distributions static uint32_t
mcache_bkt_batch_alloc(mcache_t * cp,mcache_bktlist_t * blp,mcache_bkt_t ** list,uint32_t num)1115*aca3beaaSApple OSS Distributions mcache_bkt_batch_alloc(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t **list,
1116*aca3beaaSApple OSS Distributions uint32_t num)
1117*aca3beaaSApple OSS Distributions {
1118*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt_list = NULL;
1119*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt;
1120*aca3beaaSApple OSS Distributions uint32_t need = num;
1121*aca3beaaSApple OSS Distributions
1122*aca3beaaSApple OSS Distributions ASSERT(list != NULL && need > 0);
1123*aca3beaaSApple OSS Distributions
1124*aca3beaaSApple OSS Distributions if (!MCACHE_LOCK_TRY(&cp->mc_bkt_lock)) {
1125*aca3beaaSApple OSS Distributions /*
1126*aca3beaaSApple OSS Distributions * The bucket layer lock is held by another CPU; increase
1127*aca3beaaSApple OSS Distributions * the contention count so that we can later resize the
1128*aca3beaaSApple OSS Distributions * bucket size accordingly.
1129*aca3beaaSApple OSS Distributions */
1130*aca3beaaSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1131*aca3beaaSApple OSS Distributions cp->mc_bkt_contention++;
1132*aca3beaaSApple OSS Distributions }
1133*aca3beaaSApple OSS Distributions
1134*aca3beaaSApple OSS Distributions while ((bkt = blp->bl_list) != NULL) {
1135*aca3beaaSApple OSS Distributions blp->bl_list = bkt->bkt_next;
1136*aca3beaaSApple OSS Distributions bkt->bkt_next = bkt_list;
1137*aca3beaaSApple OSS Distributions bkt_list = bkt;
1138*aca3beaaSApple OSS Distributions if (--blp->bl_total < blp->bl_min) {
1139*aca3beaaSApple OSS Distributions blp->bl_min = blp->bl_total;
1140*aca3beaaSApple OSS Distributions }
1141*aca3beaaSApple OSS Distributions blp->bl_alloc++;
1142*aca3beaaSApple OSS Distributions if (--need == 0) {
1143*aca3beaaSApple OSS Distributions break;
1144*aca3beaaSApple OSS Distributions }
1145*aca3beaaSApple OSS Distributions }
1146*aca3beaaSApple OSS Distributions
1147*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1148*aca3beaaSApple OSS Distributions
1149*aca3beaaSApple OSS Distributions *list = bkt_list;
1150*aca3beaaSApple OSS Distributions
1151*aca3beaaSApple OSS Distributions return num - need;
1152*aca3beaaSApple OSS Distributions }
1153*aca3beaaSApple OSS Distributions
1154*aca3beaaSApple OSS Distributions /*
1155*aca3beaaSApple OSS Distributions * Return one or more buckets to the bucket layer.
1156*aca3beaaSApple OSS Distributions */
1157*aca3beaaSApple OSS Distributions static void
mcache_bkt_batch_free(mcache_t * cp,mcache_bktlist_t * blp,mcache_bkt_t * bkt)1158*aca3beaaSApple OSS Distributions mcache_bkt_batch_free(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t *bkt)
1159*aca3beaaSApple OSS Distributions {
1160*aca3beaaSApple OSS Distributions mcache_bkt_t *nbkt;
1161*aca3beaaSApple OSS Distributions
1162*aca3beaaSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1163*aca3beaaSApple OSS Distributions while (bkt != NULL) {
1164*aca3beaaSApple OSS Distributions nbkt = bkt->bkt_next;
1165*aca3beaaSApple OSS Distributions bkt->bkt_next = blp->bl_list;
1166*aca3beaaSApple OSS Distributions blp->bl_list = bkt;
1167*aca3beaaSApple OSS Distributions blp->bl_total++;
1168*aca3beaaSApple OSS Distributions bkt = nbkt;
1169*aca3beaaSApple OSS Distributions }
1170*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1171*aca3beaaSApple OSS Distributions }
1172*aca3beaaSApple OSS Distributions
1173*aca3beaaSApple OSS Distributions /*
1174*aca3beaaSApple OSS Distributions * Enable the bucket layer of a cache.
1175*aca3beaaSApple OSS Distributions */
1176*aca3beaaSApple OSS Distributions static void
mcache_cache_bkt_enable(mcache_t * cp)1177*aca3beaaSApple OSS Distributions mcache_cache_bkt_enable(mcache_t *cp)
1178*aca3beaaSApple OSS Distributions {
1179*aca3beaaSApple OSS Distributions mcache_cpu_t *ccp;
1180*aca3beaaSApple OSS Distributions unsigned int cpu;
1181*aca3beaaSApple OSS Distributions
1182*aca3beaaSApple OSS Distributions if (cp->mc_flags & MCF_NOCPUCACHE) {
1183*aca3beaaSApple OSS Distributions return;
1184*aca3beaaSApple OSS Distributions }
1185*aca3beaaSApple OSS Distributions
1186*aca3beaaSApple OSS Distributions for (cpu = 0; cpu < ncpu; cpu++) {
1187*aca3beaaSApple OSS Distributions ccp = &cp->mc_cpu[cpu];
1188*aca3beaaSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
1189*aca3beaaSApple OSS Distributions ccp->cc_bktsize = cp->cache_bkttype->bt_bktsize;
1190*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
1191*aca3beaaSApple OSS Distributions }
1192*aca3beaaSApple OSS Distributions }
1193*aca3beaaSApple OSS Distributions
1194*aca3beaaSApple OSS Distributions /*
1195*aca3beaaSApple OSS Distributions * Purge all buckets from a cache and disable its bucket layer.
1196*aca3beaaSApple OSS Distributions */
1197*aca3beaaSApple OSS Distributions static void
mcache_bkt_purge(mcache_t * cp)1198*aca3beaaSApple OSS Distributions mcache_bkt_purge(mcache_t *cp)
1199*aca3beaaSApple OSS Distributions {
1200*aca3beaaSApple OSS Distributions mcache_cpu_t *ccp;
1201*aca3beaaSApple OSS Distributions mcache_bkt_t *bp, *pbp;
1202*aca3beaaSApple OSS Distributions int objs, pobjs;
1203*aca3beaaSApple OSS Distributions unsigned int cpu;
1204*aca3beaaSApple OSS Distributions
1205*aca3beaaSApple OSS Distributions for (cpu = 0; cpu < ncpu; cpu++) {
1206*aca3beaaSApple OSS Distributions ccp = &cp->mc_cpu[cpu];
1207*aca3beaaSApple OSS Distributions
1208*aca3beaaSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
1209*aca3beaaSApple OSS Distributions
1210*aca3beaaSApple OSS Distributions bp = ccp->cc_filled;
1211*aca3beaaSApple OSS Distributions pbp = ccp->cc_pfilled;
1212*aca3beaaSApple OSS Distributions objs = ccp->cc_objs;
1213*aca3beaaSApple OSS Distributions pobjs = ccp->cc_pobjs;
1214*aca3beaaSApple OSS Distributions ccp->cc_filled = NULL;
1215*aca3beaaSApple OSS Distributions ccp->cc_pfilled = NULL;
1216*aca3beaaSApple OSS Distributions ccp->cc_objs = -1;
1217*aca3beaaSApple OSS Distributions ccp->cc_pobjs = -1;
1218*aca3beaaSApple OSS Distributions ccp->cc_bktsize = 0;
1219*aca3beaaSApple OSS Distributions
1220*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
1221*aca3beaaSApple OSS Distributions
1222*aca3beaaSApple OSS Distributions if (bp != NULL) {
1223*aca3beaaSApple OSS Distributions mcache_bkt_destroy(cp, bp, objs);
1224*aca3beaaSApple OSS Distributions }
1225*aca3beaaSApple OSS Distributions if (pbp != NULL) {
1226*aca3beaaSApple OSS Distributions mcache_bkt_destroy(cp, pbp, pobjs);
1227*aca3beaaSApple OSS Distributions }
1228*aca3beaaSApple OSS Distributions }
1229*aca3beaaSApple OSS Distributions
1230*aca3beaaSApple OSS Distributions mcache_bkt_ws_zero(cp);
1231*aca3beaaSApple OSS Distributions mcache_bkt_ws_reap(cp);
1232*aca3beaaSApple OSS Distributions }
1233*aca3beaaSApple OSS Distributions
1234*aca3beaaSApple OSS Distributions /*
1235*aca3beaaSApple OSS Distributions * Free one or more objects in the bucket to the slab layer,
1236*aca3beaaSApple OSS Distributions * and also free the bucket itself.
1237*aca3beaaSApple OSS Distributions */
1238*aca3beaaSApple OSS Distributions static void
mcache_bkt_destroy(mcache_t * cp,mcache_bkt_t * bkt,int nobjs)1239*aca3beaaSApple OSS Distributions mcache_bkt_destroy(mcache_t *cp, mcache_bkt_t *bkt, int nobjs)
1240*aca3beaaSApple OSS Distributions {
1241*aca3beaaSApple OSS Distributions if (nobjs > 0) {
1242*aca3beaaSApple OSS Distributions mcache_obj_t *top = bkt->bkt_obj[nobjs - 1];
1243*aca3beaaSApple OSS Distributions
1244*aca3beaaSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
1245*aca3beaaSApple OSS Distributions mcache_obj_t *o = top;
1246*aca3beaaSApple OSS Distributions int cnt = 0;
1247*aca3beaaSApple OSS Distributions
1248*aca3beaaSApple OSS Distributions /*
1249*aca3beaaSApple OSS Distributions * Verify that the chain of objects in the bucket is
1250*aca3beaaSApple OSS Distributions * valid. Any mismatch here means a mistake when the
1251*aca3beaaSApple OSS Distributions * object(s) were freed to the CPU layer, so we panic.
1252*aca3beaaSApple OSS Distributions */
1253*aca3beaaSApple OSS Distributions while (o != NULL) {
1254*aca3beaaSApple OSS Distributions o = o->obj_next;
1255*aca3beaaSApple OSS Distributions ++cnt;
1256*aca3beaaSApple OSS Distributions }
1257*aca3beaaSApple OSS Distributions if (cnt != nobjs) {
1258*aca3beaaSApple OSS Distributions panic("mcache_bkt_destroy: %s cp %p corrupted "
1259*aca3beaaSApple OSS Distributions "list in bkt %p (nobjs %d actual %d)\n",
1260*aca3beaaSApple OSS Distributions cp->mc_name, (void *)cp, (void *)bkt,
1261*aca3beaaSApple OSS Distributions nobjs, cnt);
1262*aca3beaaSApple OSS Distributions /* NOTREACHED */
1263*aca3beaaSApple OSS Distributions __builtin_unreachable();
1264*aca3beaaSApple OSS Distributions }
1265*aca3beaaSApple OSS Distributions }
1266*aca3beaaSApple OSS Distributions
1267*aca3beaaSApple OSS Distributions /* Advise the slab layer to purge the object(s) */
1268*aca3beaaSApple OSS Distributions (*cp->mc_slab_free)(cp->mc_private, top,
1269*aca3beaaSApple OSS Distributions (cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
1270*aca3beaaSApple OSS Distributions }
1271*aca3beaaSApple OSS Distributions mcache_free(bkt->bkt_type->bt_cache, bkt);
1272*aca3beaaSApple OSS Distributions }
1273*aca3beaaSApple OSS Distributions
1274*aca3beaaSApple OSS Distributions /*
1275*aca3beaaSApple OSS Distributions * Update the bucket layer working set statistics.
1276*aca3beaaSApple OSS Distributions */
1277*aca3beaaSApple OSS Distributions static void
mcache_bkt_ws_update(mcache_t * cp)1278*aca3beaaSApple OSS Distributions mcache_bkt_ws_update(mcache_t *cp)
1279*aca3beaaSApple OSS Distributions {
1280*aca3beaaSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1281*aca3beaaSApple OSS Distributions
1282*aca3beaaSApple OSS Distributions cp->mc_full.bl_reaplimit = cp->mc_full.bl_min;
1283*aca3beaaSApple OSS Distributions cp->mc_full.bl_min = cp->mc_full.bl_total;
1284*aca3beaaSApple OSS Distributions cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_min;
1285*aca3beaaSApple OSS Distributions cp->mc_empty.bl_min = cp->mc_empty.bl_total;
1286*aca3beaaSApple OSS Distributions
1287*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1288*aca3beaaSApple OSS Distributions }
1289*aca3beaaSApple OSS Distributions
1290*aca3beaaSApple OSS Distributions /*
1291*aca3beaaSApple OSS Distributions * Mark everything as eligible for reaping (working set is zero).
1292*aca3beaaSApple OSS Distributions */
1293*aca3beaaSApple OSS Distributions static void
mcache_bkt_ws_zero(mcache_t * cp)1294*aca3beaaSApple OSS Distributions mcache_bkt_ws_zero(mcache_t *cp)
1295*aca3beaaSApple OSS Distributions {
1296*aca3beaaSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1297*aca3beaaSApple OSS Distributions
1298*aca3beaaSApple OSS Distributions cp->mc_full.bl_reaplimit = cp->mc_full.bl_total;
1299*aca3beaaSApple OSS Distributions cp->mc_full.bl_min = cp->mc_full.bl_total;
1300*aca3beaaSApple OSS Distributions cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_total;
1301*aca3beaaSApple OSS Distributions cp->mc_empty.bl_min = cp->mc_empty.bl_total;
1302*aca3beaaSApple OSS Distributions
1303*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1304*aca3beaaSApple OSS Distributions }
1305*aca3beaaSApple OSS Distributions
1306*aca3beaaSApple OSS Distributions /*
1307*aca3beaaSApple OSS Distributions * Reap all buckets that are beyond the working set.
1308*aca3beaaSApple OSS Distributions */
1309*aca3beaaSApple OSS Distributions static void
mcache_bkt_ws_reap(mcache_t * cp)1310*aca3beaaSApple OSS Distributions mcache_bkt_ws_reap(mcache_t *cp)
1311*aca3beaaSApple OSS Distributions {
1312*aca3beaaSApple OSS Distributions mcache_bkt_t *bkt, *nbkt;
1313*aca3beaaSApple OSS Distributions uint32_t reap;
1314*aca3beaaSApple OSS Distributions
1315*aca3beaaSApple OSS Distributions reap = MIN(cp->mc_full.bl_reaplimit, cp->mc_full.bl_min);
1316*aca3beaaSApple OSS Distributions if (reap != 0) {
1317*aca3beaaSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_full, &bkt, reap);
1318*aca3beaaSApple OSS Distributions while (bkt != NULL) {
1319*aca3beaaSApple OSS Distributions nbkt = bkt->bkt_next;
1320*aca3beaaSApple OSS Distributions bkt->bkt_next = NULL;
1321*aca3beaaSApple OSS Distributions mcache_bkt_destroy(cp, bkt, bkt->bkt_type->bt_bktsize);
1322*aca3beaaSApple OSS Distributions bkt = nbkt;
1323*aca3beaaSApple OSS Distributions }
1324*aca3beaaSApple OSS Distributions }
1325*aca3beaaSApple OSS Distributions
1326*aca3beaaSApple OSS Distributions reap = MIN(cp->mc_empty.bl_reaplimit, cp->mc_empty.bl_min);
1327*aca3beaaSApple OSS Distributions if (reap != 0) {
1328*aca3beaaSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_empty, &bkt, reap);
1329*aca3beaaSApple OSS Distributions while (bkt != NULL) {
1330*aca3beaaSApple OSS Distributions nbkt = bkt->bkt_next;
1331*aca3beaaSApple OSS Distributions bkt->bkt_next = NULL;
1332*aca3beaaSApple OSS Distributions mcache_bkt_destroy(cp, bkt, 0);
1333*aca3beaaSApple OSS Distributions bkt = nbkt;
1334*aca3beaaSApple OSS Distributions }
1335*aca3beaaSApple OSS Distributions }
1336*aca3beaaSApple OSS Distributions }
1337*aca3beaaSApple OSS Distributions
1338*aca3beaaSApple OSS Distributions static void
mcache_reap_timeout(thread_call_param_t dummy __unused,thread_call_param_t arg)1339*aca3beaaSApple OSS Distributions mcache_reap_timeout(thread_call_param_t dummy __unused,
1340*aca3beaaSApple OSS Distributions thread_call_param_t arg)
1341*aca3beaaSApple OSS Distributions {
1342*aca3beaaSApple OSS Distributions volatile UInt32 *flag = arg;
1343*aca3beaaSApple OSS Distributions
1344*aca3beaaSApple OSS Distributions ASSERT(flag == &mcache_reaping);
1345*aca3beaaSApple OSS Distributions
1346*aca3beaaSApple OSS Distributions *flag = 0;
1347*aca3beaaSApple OSS Distributions }
1348*aca3beaaSApple OSS Distributions
1349*aca3beaaSApple OSS Distributions static void
mcache_reap_done(void * flag)1350*aca3beaaSApple OSS Distributions mcache_reap_done(void *flag)
1351*aca3beaaSApple OSS Distributions {
1352*aca3beaaSApple OSS Distributions uint64_t deadline, leeway;
1353*aca3beaaSApple OSS Distributions
1354*aca3beaaSApple OSS Distributions clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
1355*aca3beaaSApple OSS Distributions &deadline);
1356*aca3beaaSApple OSS Distributions clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
1357*aca3beaaSApple OSS Distributions NSEC_PER_SEC, &leeway);
1358*aca3beaaSApple OSS Distributions thread_call_enter_delayed_with_leeway(mcache_reap_tcall, flag,
1359*aca3beaaSApple OSS Distributions deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1360*aca3beaaSApple OSS Distributions }
1361*aca3beaaSApple OSS Distributions
1362*aca3beaaSApple OSS Distributions static void
mcache_reap_start(void * arg)1363*aca3beaaSApple OSS Distributions mcache_reap_start(void *arg)
1364*aca3beaaSApple OSS Distributions {
1365*aca3beaaSApple OSS Distributions UInt32 *flag = arg;
1366*aca3beaaSApple OSS Distributions
1367*aca3beaaSApple OSS Distributions ASSERT(flag == &mcache_reaping);
1368*aca3beaaSApple OSS Distributions
1369*aca3beaaSApple OSS Distributions mcache_applyall(mcache_cache_reap);
1370*aca3beaaSApple OSS Distributions mcache_dispatch(mcache_reap_done, flag);
1371*aca3beaaSApple OSS Distributions }
1372*aca3beaaSApple OSS Distributions
1373*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_reap(void)1374*aca3beaaSApple OSS Distributions mcache_reap(void)
1375*aca3beaaSApple OSS Distributions {
1376*aca3beaaSApple OSS Distributions UInt32 *flag = &mcache_reaping;
1377*aca3beaaSApple OSS Distributions
1378*aca3beaaSApple OSS Distributions if (mcache_llock_owner == current_thread() ||
1379*aca3beaaSApple OSS Distributions !OSCompareAndSwap(0, 1, flag)) {
1380*aca3beaaSApple OSS Distributions return;
1381*aca3beaaSApple OSS Distributions }
1382*aca3beaaSApple OSS Distributions
1383*aca3beaaSApple OSS Distributions mcache_dispatch(mcache_reap_start, flag);
1384*aca3beaaSApple OSS Distributions }
1385*aca3beaaSApple OSS Distributions
1386*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_reap_now(mcache_t * cp,boolean_t purge)1387*aca3beaaSApple OSS Distributions mcache_reap_now(mcache_t *cp, boolean_t purge)
1388*aca3beaaSApple OSS Distributions {
1389*aca3beaaSApple OSS Distributions if (purge) {
1390*aca3beaaSApple OSS Distributions mcache_bkt_purge(cp);
1391*aca3beaaSApple OSS Distributions mcache_cache_bkt_enable(cp);
1392*aca3beaaSApple OSS Distributions } else {
1393*aca3beaaSApple OSS Distributions mcache_bkt_ws_zero(cp);
1394*aca3beaaSApple OSS Distributions mcache_bkt_ws_reap(cp);
1395*aca3beaaSApple OSS Distributions }
1396*aca3beaaSApple OSS Distributions }
1397*aca3beaaSApple OSS Distributions
1398*aca3beaaSApple OSS Distributions static void
mcache_cache_reap(mcache_t * cp)1399*aca3beaaSApple OSS Distributions mcache_cache_reap(mcache_t *cp)
1400*aca3beaaSApple OSS Distributions {
1401*aca3beaaSApple OSS Distributions mcache_bkt_ws_reap(cp);
1402*aca3beaaSApple OSS Distributions }
1403*aca3beaaSApple OSS Distributions
1404*aca3beaaSApple OSS Distributions /*
1405*aca3beaaSApple OSS Distributions * Performs period maintenance on a cache.
1406*aca3beaaSApple OSS Distributions */
1407*aca3beaaSApple OSS Distributions static void
mcache_cache_update(mcache_t * cp)1408*aca3beaaSApple OSS Distributions mcache_cache_update(mcache_t *cp)
1409*aca3beaaSApple OSS Distributions {
1410*aca3beaaSApple OSS Distributions int need_bkt_resize = 0;
1411*aca3beaaSApple OSS Distributions int need_bkt_reenable = 0;
1412*aca3beaaSApple OSS Distributions
1413*aca3beaaSApple OSS Distributions lck_mtx_assert(&mcache_llock, LCK_MTX_ASSERT_OWNED);
1414*aca3beaaSApple OSS Distributions
1415*aca3beaaSApple OSS Distributions mcache_bkt_ws_update(cp);
1416*aca3beaaSApple OSS Distributions
1417*aca3beaaSApple OSS Distributions /*
1418*aca3beaaSApple OSS Distributions * Cache resize and post-purge reenable are mutually exclusive.
1419*aca3beaaSApple OSS Distributions * If the cache was previously purged, there is no point of
1420*aca3beaaSApple OSS Distributions * increasing the bucket size as there was an indication of
1421*aca3beaaSApple OSS Distributions * memory pressure on the system.
1422*aca3beaaSApple OSS Distributions */
1423*aca3beaaSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
1424*aca3beaaSApple OSS Distributions if (!(cp->mc_flags & MCF_NOCPUCACHE) && cp->mc_enable_cnt) {
1425*aca3beaaSApple OSS Distributions need_bkt_reenable = 1;
1426*aca3beaaSApple OSS Distributions }
1427*aca3beaaSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
1428*aca3beaaSApple OSS Distributions
1429*aca3beaaSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1430*aca3beaaSApple OSS Distributions /*
1431*aca3beaaSApple OSS Distributions * If the contention count is greater than the threshold, and if
1432*aca3beaaSApple OSS Distributions * we are not already at the maximum bucket size, increase it.
1433*aca3beaaSApple OSS Distributions * Otherwise, if this cache was previously purged by the user
1434*aca3beaaSApple OSS Distributions * then we simply reenable it.
1435*aca3beaaSApple OSS Distributions */
1436*aca3beaaSApple OSS Distributions if ((unsigned int)cp->mc_chunksize < cp->cache_bkttype->bt_maxbuf &&
1437*aca3beaaSApple OSS Distributions (int)(cp->mc_bkt_contention - cp->mc_bkt_contention_prev) >
1438*aca3beaaSApple OSS Distributions mcache_bkt_contention && !need_bkt_reenable) {
1439*aca3beaaSApple OSS Distributions need_bkt_resize = 1;
1440*aca3beaaSApple OSS Distributions }
1441*aca3beaaSApple OSS Distributions
1442*aca3beaaSApple OSS Distributions cp->mc_bkt_contention_prev = cp->mc_bkt_contention;
1443*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1444*aca3beaaSApple OSS Distributions
1445*aca3beaaSApple OSS Distributions if (need_bkt_resize) {
1446*aca3beaaSApple OSS Distributions mcache_dispatch(mcache_cache_bkt_resize, cp);
1447*aca3beaaSApple OSS Distributions } else if (need_bkt_reenable) {
1448*aca3beaaSApple OSS Distributions mcache_dispatch(mcache_cache_enable, cp);
1449*aca3beaaSApple OSS Distributions }
1450*aca3beaaSApple OSS Distributions }
1451*aca3beaaSApple OSS Distributions
1452*aca3beaaSApple OSS Distributions /*
1453*aca3beaaSApple OSS Distributions * Recompute a cache's bucket size. This is an expensive operation
1454*aca3beaaSApple OSS Distributions * and should not be done frequently; larger buckets provide for a
1455*aca3beaaSApple OSS Distributions * higher transfer rate with the bucket while smaller buckets reduce
1456*aca3beaaSApple OSS Distributions * the memory consumption.
1457*aca3beaaSApple OSS Distributions */
1458*aca3beaaSApple OSS Distributions static void
mcache_cache_bkt_resize(void * arg)1459*aca3beaaSApple OSS Distributions mcache_cache_bkt_resize(void *arg)
1460*aca3beaaSApple OSS Distributions {
1461*aca3beaaSApple OSS Distributions mcache_t *cp = arg;
1462*aca3beaaSApple OSS Distributions mcache_bkttype_t *btp = cp->cache_bkttype;
1463*aca3beaaSApple OSS Distributions
1464*aca3beaaSApple OSS Distributions if ((unsigned int)cp->mc_chunksize < btp->bt_maxbuf) {
1465*aca3beaaSApple OSS Distributions mcache_bkt_purge(cp);
1466*aca3beaaSApple OSS Distributions
1467*aca3beaaSApple OSS Distributions /*
1468*aca3beaaSApple OSS Distributions * Upgrade to the next bucket type with larger bucket size;
1469*aca3beaaSApple OSS Distributions * temporarily set the previous contention snapshot to a
1470*aca3beaaSApple OSS Distributions * negative number to prevent unnecessary resize request.
1471*aca3beaaSApple OSS Distributions */
1472*aca3beaaSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1473*aca3beaaSApple OSS Distributions cp->cache_bkttype = ++btp;
1474*aca3beaaSApple OSS Distributions cp->mc_bkt_contention_prev = cp->mc_bkt_contention + INT_MAX;
1475*aca3beaaSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1476*aca3beaaSApple OSS Distributions
1477*aca3beaaSApple OSS Distributions mcache_cache_enable(cp);
1478*aca3beaaSApple OSS Distributions }
1479*aca3beaaSApple OSS Distributions }
1480*aca3beaaSApple OSS Distributions
1481*aca3beaaSApple OSS Distributions /*
1482*aca3beaaSApple OSS Distributions * Reenable a previously disabled cache due to purge.
1483*aca3beaaSApple OSS Distributions */
1484*aca3beaaSApple OSS Distributions static void
mcache_cache_enable(void * arg)1485*aca3beaaSApple OSS Distributions mcache_cache_enable(void *arg)
1486*aca3beaaSApple OSS Distributions {
1487*aca3beaaSApple OSS Distributions mcache_t *cp = arg;
1488*aca3beaaSApple OSS Distributions
1489*aca3beaaSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
1490*aca3beaaSApple OSS Distributions cp->mc_purge_cnt = 0;
1491*aca3beaaSApple OSS Distributions cp->mc_enable_cnt = 0;
1492*aca3beaaSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
1493*aca3beaaSApple OSS Distributions
1494*aca3beaaSApple OSS Distributions mcache_cache_bkt_enable(cp);
1495*aca3beaaSApple OSS Distributions }
1496*aca3beaaSApple OSS Distributions
1497*aca3beaaSApple OSS Distributions static void
mcache_update_timeout(__unused void * arg)1498*aca3beaaSApple OSS Distributions mcache_update_timeout(__unused void *arg)
1499*aca3beaaSApple OSS Distributions {
1500*aca3beaaSApple OSS Distributions uint64_t deadline, leeway;
1501*aca3beaaSApple OSS Distributions
1502*aca3beaaSApple OSS Distributions clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
1503*aca3beaaSApple OSS Distributions &deadline);
1504*aca3beaaSApple OSS Distributions clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
1505*aca3beaaSApple OSS Distributions NSEC_PER_SEC, &leeway);
1506*aca3beaaSApple OSS Distributions thread_call_enter_delayed_with_leeway(mcache_update_tcall, NULL,
1507*aca3beaaSApple OSS Distributions deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1508*aca3beaaSApple OSS Distributions }
1509*aca3beaaSApple OSS Distributions
1510*aca3beaaSApple OSS Distributions static void
mcache_update(thread_call_param_t arg __unused,thread_call_param_t dummy __unused)1511*aca3beaaSApple OSS Distributions mcache_update(thread_call_param_t arg __unused,
1512*aca3beaaSApple OSS Distributions thread_call_param_t dummy __unused)
1513*aca3beaaSApple OSS Distributions {
1514*aca3beaaSApple OSS Distributions mcache_applyall(mcache_cache_update);
1515*aca3beaaSApple OSS Distributions mcache_update_timeout(NULL);
1516*aca3beaaSApple OSS Distributions }
1517*aca3beaaSApple OSS Distributions
1518*aca3beaaSApple OSS Distributions static void
mcache_applyall(void (* func)(mcache_t *))1519*aca3beaaSApple OSS Distributions mcache_applyall(void (*func)(mcache_t *))
1520*aca3beaaSApple OSS Distributions {
1521*aca3beaaSApple OSS Distributions mcache_t *cp;
1522*aca3beaaSApple OSS Distributions
1523*aca3beaaSApple OSS Distributions MCACHE_LIST_LOCK();
1524*aca3beaaSApple OSS Distributions LIST_FOREACH(cp, &mcache_head, mc_list) {
1525*aca3beaaSApple OSS Distributions func(cp);
1526*aca3beaaSApple OSS Distributions }
1527*aca3beaaSApple OSS Distributions MCACHE_LIST_UNLOCK();
1528*aca3beaaSApple OSS Distributions }
1529*aca3beaaSApple OSS Distributions
1530*aca3beaaSApple OSS Distributions static void
mcache_dispatch(void (* func)(void *),void * arg)1531*aca3beaaSApple OSS Distributions mcache_dispatch(void (*func)(void *), void *arg)
1532*aca3beaaSApple OSS Distributions {
1533*aca3beaaSApple OSS Distributions ASSERT(func != NULL);
1534*aca3beaaSApple OSS Distributions timeout(func, arg, hz / 1000);
1535*aca3beaaSApple OSS Distributions }
1536*aca3beaaSApple OSS Distributions
1537*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_buffer_log(mcache_audit_t * mca,void * addr,mcache_t * cp,struct timeval * base_ts)1538*aca3beaaSApple OSS Distributions mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp,
1539*aca3beaaSApple OSS Distributions struct timeval *base_ts)
1540*aca3beaaSApple OSS Distributions {
1541*aca3beaaSApple OSS Distributions struct timeval now, base = { .tv_sec = 0, .tv_usec = 0 };
1542*aca3beaaSApple OSS Distributions void *stack[MCACHE_STACK_DEPTH + 1];
1543*aca3beaaSApple OSS Distributions struct mca_trn *transaction;
1544*aca3beaaSApple OSS Distributions
1545*aca3beaaSApple OSS Distributions transaction = &mca->mca_trns[mca->mca_next_trn];
1546*aca3beaaSApple OSS Distributions
1547*aca3beaaSApple OSS Distributions mca->mca_addr = addr;
1548*aca3beaaSApple OSS Distributions mca->mca_cache = cp;
1549*aca3beaaSApple OSS Distributions
1550*aca3beaaSApple OSS Distributions transaction->mca_thread = current_thread();
1551*aca3beaaSApple OSS Distributions
1552*aca3beaaSApple OSS Distributions bzero(stack, sizeof(stack));
1553*aca3beaaSApple OSS Distributions transaction->mca_depth = (uint16_t)OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
1554*aca3beaaSApple OSS Distributions bcopy(&stack[1], transaction->mca_stack,
1555*aca3beaaSApple OSS Distributions sizeof(transaction->mca_stack));
1556*aca3beaaSApple OSS Distributions
1557*aca3beaaSApple OSS Distributions microuptime(&now);
1558*aca3beaaSApple OSS Distributions if (base_ts != NULL) {
1559*aca3beaaSApple OSS Distributions base = *base_ts;
1560*aca3beaaSApple OSS Distributions }
1561*aca3beaaSApple OSS Distributions /* tstamp is in ms relative to base_ts */
1562*aca3beaaSApple OSS Distributions transaction->mca_tstamp = ((now.tv_usec - base.tv_usec) / 1000);
1563*aca3beaaSApple OSS Distributions if ((now.tv_sec - base.tv_sec) > 0) {
1564*aca3beaaSApple OSS Distributions transaction->mca_tstamp += ((now.tv_sec - base.tv_sec) * 1000);
1565*aca3beaaSApple OSS Distributions }
1566*aca3beaaSApple OSS Distributions
1567*aca3beaaSApple OSS Distributions mca->mca_next_trn =
1568*aca3beaaSApple OSS Distributions (mca->mca_next_trn + 1) % mca_trn_max;
1569*aca3beaaSApple OSS Distributions }
1570*aca3beaaSApple OSS Distributions
1571*aca3beaaSApple OSS Distributions /*
1572*aca3beaaSApple OSS Distributions * N.B.: mcache_set_pattern(), mcache_verify_pattern() and
1573*aca3beaaSApple OSS Distributions * mcache_verify_set_pattern() are marked as noinline to prevent the
1574*aca3beaaSApple OSS Distributions * compiler from aliasing pointers when they are inlined inside the callers
1575*aca3beaaSApple OSS Distributions * (e.g. mcache_audit_free_verify_set()) which would be undefined behavior.
1576*aca3beaaSApple OSS Distributions */
1577*aca3beaaSApple OSS Distributions __private_extern__ OS_NOINLINE void
mcache_set_pattern(u_int64_t pattern,void * buf_arg,size_t size)1578*aca3beaaSApple OSS Distributions mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size)
1579*aca3beaaSApple OSS Distributions {
1580*aca3beaaSApple OSS Distributions u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1581*aca3beaaSApple OSS Distributions u_int64_t *buf = (u_int64_t *)buf_arg;
1582*aca3beaaSApple OSS Distributions
1583*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1584*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1585*aca3beaaSApple OSS Distributions
1586*aca3beaaSApple OSS Distributions while (buf < buf_end) {
1587*aca3beaaSApple OSS Distributions *buf++ = pattern;
1588*aca3beaaSApple OSS Distributions }
1589*aca3beaaSApple OSS Distributions }
1590*aca3beaaSApple OSS Distributions
1591*aca3beaaSApple OSS Distributions __private_extern__ OS_NOINLINE void *
mcache_verify_pattern(u_int64_t pattern,void * buf_arg,size_t size)1592*aca3beaaSApple OSS Distributions mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size)
1593*aca3beaaSApple OSS Distributions {
1594*aca3beaaSApple OSS Distributions u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1595*aca3beaaSApple OSS Distributions u_int64_t *buf;
1596*aca3beaaSApple OSS Distributions
1597*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1598*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1599*aca3beaaSApple OSS Distributions
1600*aca3beaaSApple OSS Distributions for (buf = buf_arg; buf < buf_end; buf++) {
1601*aca3beaaSApple OSS Distributions if (*buf != pattern) {
1602*aca3beaaSApple OSS Distributions return buf;
1603*aca3beaaSApple OSS Distributions }
1604*aca3beaaSApple OSS Distributions }
1605*aca3beaaSApple OSS Distributions return NULL;
1606*aca3beaaSApple OSS Distributions }
1607*aca3beaaSApple OSS Distributions
1608*aca3beaaSApple OSS Distributions OS_NOINLINE static void *
mcache_verify_set_pattern(u_int64_t old,u_int64_t new,void * buf_arg,size_t size)1609*aca3beaaSApple OSS Distributions mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg,
1610*aca3beaaSApple OSS Distributions size_t size)
1611*aca3beaaSApple OSS Distributions {
1612*aca3beaaSApple OSS Distributions u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1613*aca3beaaSApple OSS Distributions u_int64_t *buf;
1614*aca3beaaSApple OSS Distributions
1615*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1616*aca3beaaSApple OSS Distributions VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1617*aca3beaaSApple OSS Distributions
1618*aca3beaaSApple OSS Distributions for (buf = buf_arg; buf < buf_end; buf++) {
1619*aca3beaaSApple OSS Distributions if (*buf != old) {
1620*aca3beaaSApple OSS Distributions mcache_set_pattern(old, buf_arg,
1621*aca3beaaSApple OSS Distributions (uintptr_t)buf - (uintptr_t)buf_arg);
1622*aca3beaaSApple OSS Distributions return buf;
1623*aca3beaaSApple OSS Distributions }
1624*aca3beaaSApple OSS Distributions *buf = new;
1625*aca3beaaSApple OSS Distributions }
1626*aca3beaaSApple OSS Distributions return NULL;
1627*aca3beaaSApple OSS Distributions }
1628*aca3beaaSApple OSS Distributions
1629*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_audit_free_verify(mcache_audit_t * mca,void * base,size_t offset,size_t size)1630*aca3beaaSApple OSS Distributions mcache_audit_free_verify(mcache_audit_t *mca, void *base, size_t offset,
1631*aca3beaaSApple OSS Distributions size_t size)
1632*aca3beaaSApple OSS Distributions {
1633*aca3beaaSApple OSS Distributions void *addr;
1634*aca3beaaSApple OSS Distributions u_int64_t *oaddr64;
1635*aca3beaaSApple OSS Distributions mcache_obj_t *next;
1636*aca3beaaSApple OSS Distributions
1637*aca3beaaSApple OSS Distributions addr = (void *)((uintptr_t)base + offset);
1638*aca3beaaSApple OSS Distributions next = ((mcache_obj_t *)addr)->obj_next;
1639*aca3beaaSApple OSS Distributions
1640*aca3beaaSApple OSS Distributions /* For the "obj_next" pointer in the buffer */
1641*aca3beaaSApple OSS Distributions oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t));
1642*aca3beaaSApple OSS Distributions *oaddr64 = MCACHE_FREE_PATTERN;
1643*aca3beaaSApple OSS Distributions
1644*aca3beaaSApple OSS Distributions if ((oaddr64 = mcache_verify_pattern(MCACHE_FREE_PATTERN,
1645*aca3beaaSApple OSS Distributions (caddr_t)base, size)) != NULL) {
1646*aca3beaaSApple OSS Distributions mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
1647*aca3beaaSApple OSS Distributions (int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
1648*aca3beaaSApple OSS Distributions /* NOTREACHED */
1649*aca3beaaSApple OSS Distributions }
1650*aca3beaaSApple OSS Distributions ((mcache_obj_t *)addr)->obj_next = next;
1651*aca3beaaSApple OSS Distributions }
1652*aca3beaaSApple OSS Distributions
1653*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_audit_free_verify_set(mcache_audit_t * mca,void * base,size_t offset,size_t size)1654*aca3beaaSApple OSS Distributions mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset,
1655*aca3beaaSApple OSS Distributions size_t size)
1656*aca3beaaSApple OSS Distributions {
1657*aca3beaaSApple OSS Distributions void *addr;
1658*aca3beaaSApple OSS Distributions u_int64_t *oaddr64;
1659*aca3beaaSApple OSS Distributions mcache_obj_t *next;
1660*aca3beaaSApple OSS Distributions
1661*aca3beaaSApple OSS Distributions addr = (void *)((uintptr_t)base + offset);
1662*aca3beaaSApple OSS Distributions next = ((mcache_obj_t *)addr)->obj_next;
1663*aca3beaaSApple OSS Distributions
1664*aca3beaaSApple OSS Distributions /* For the "obj_next" pointer in the buffer */
1665*aca3beaaSApple OSS Distributions oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t));
1666*aca3beaaSApple OSS Distributions *oaddr64 = MCACHE_FREE_PATTERN;
1667*aca3beaaSApple OSS Distributions
1668*aca3beaaSApple OSS Distributions if ((oaddr64 = mcache_verify_set_pattern(MCACHE_FREE_PATTERN,
1669*aca3beaaSApple OSS Distributions MCACHE_UNINITIALIZED_PATTERN, (caddr_t)base, size)) != NULL) {
1670*aca3beaaSApple OSS Distributions mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
1671*aca3beaaSApple OSS Distributions (int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
1672*aca3beaaSApple OSS Distributions /* NOTREACHED */
1673*aca3beaaSApple OSS Distributions }
1674*aca3beaaSApple OSS Distributions ((mcache_obj_t *)addr)->obj_next = next;
1675*aca3beaaSApple OSS Distributions }
1676*aca3beaaSApple OSS Distributions
1677*aca3beaaSApple OSS Distributions #undef panic
1678*aca3beaaSApple OSS Distributions
1679*aca3beaaSApple OSS Distributions #define DUMP_TRN_FMT() \
1680*aca3beaaSApple OSS Distributions "%s transaction thread %p saved PC stack (%d deep):\n" \
1681*aca3beaaSApple OSS Distributions "\t%p, %p, %p, %p, %p, %p, %p, %p\n" \
1682*aca3beaaSApple OSS Distributions "\t%p, %p, %p, %p, %p, %p, %p, %p\n"
1683*aca3beaaSApple OSS Distributions
1684*aca3beaaSApple OSS Distributions #define DUMP_TRN_FIELDS(s, x) \
1685*aca3beaaSApple OSS Distributions s, \
1686*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_thread, mca->mca_trns[x].mca_depth, \
1687*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[0], mca->mca_trns[x].mca_stack[1], \
1688*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[2], mca->mca_trns[x].mca_stack[3], \
1689*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[4], mca->mca_trns[x].mca_stack[5], \
1690*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[6], mca->mca_trns[x].mca_stack[7], \
1691*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[8], mca->mca_trns[x].mca_stack[9], \
1692*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[10], mca->mca_trns[x].mca_stack[11], \
1693*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[12], mca->mca_trns[x].mca_stack[13], \
1694*aca3beaaSApple OSS Distributions mca->mca_trns[x].mca_stack[14], mca->mca_trns[x].mca_stack[15]
1695*aca3beaaSApple OSS Distributions
1696*aca3beaaSApple OSS Distributions #define MCA_TRN_LAST ((mca->mca_next_trn + mca_trn_max) % mca_trn_max)
1697*aca3beaaSApple OSS Distributions #define MCA_TRN_PREV ((mca->mca_next_trn + mca_trn_max - 1) % mca_trn_max)
1698*aca3beaaSApple OSS Distributions
1699*aca3beaaSApple OSS Distributions __private_extern__ char *
mcache_dump_mca(char buf[static DUMP_MCA_BUF_SIZE],mcache_audit_t * mca)1700*aca3beaaSApple OSS Distributions mcache_dump_mca(char buf[static DUMP_MCA_BUF_SIZE], mcache_audit_t *mca)
1701*aca3beaaSApple OSS Distributions {
1702*aca3beaaSApple OSS Distributions snprintf(buf, DUMP_MCA_BUF_SIZE,
1703*aca3beaaSApple OSS Distributions "mca %p: addr %p, cache %p (%s) nxttrn %d\n"
1704*aca3beaaSApple OSS Distributions DUMP_TRN_FMT()
1705*aca3beaaSApple OSS Distributions DUMP_TRN_FMT(),
1706*aca3beaaSApple OSS Distributions
1707*aca3beaaSApple OSS Distributions mca, mca->mca_addr, mca->mca_cache,
1708*aca3beaaSApple OSS Distributions mca->mca_cache ? mca->mca_cache->mc_name : "?",
1709*aca3beaaSApple OSS Distributions mca->mca_next_trn,
1710*aca3beaaSApple OSS Distributions
1711*aca3beaaSApple OSS Distributions DUMP_TRN_FIELDS("last", MCA_TRN_LAST),
1712*aca3beaaSApple OSS Distributions DUMP_TRN_FIELDS("previous", MCA_TRN_PREV));
1713*aca3beaaSApple OSS Distributions
1714*aca3beaaSApple OSS Distributions return buf;
1715*aca3beaaSApple OSS Distributions }
1716*aca3beaaSApple OSS Distributions
1717*aca3beaaSApple OSS Distributions __private_extern__ void
mcache_audit_panic(mcache_audit_t * mca,void * addr,size_t offset,int64_t expected,int64_t got)1718*aca3beaaSApple OSS Distributions mcache_audit_panic(mcache_audit_t *mca, void *addr, size_t offset,
1719*aca3beaaSApple OSS Distributions int64_t expected, int64_t got)
1720*aca3beaaSApple OSS Distributions {
1721*aca3beaaSApple OSS Distributions char buf[DUMP_MCA_BUF_SIZE];
1722*aca3beaaSApple OSS Distributions
1723*aca3beaaSApple OSS Distributions if (mca == NULL) {
1724*aca3beaaSApple OSS Distributions panic("mcache_audit: buffer %p modified after free at "
1725*aca3beaaSApple OSS Distributions "offset 0x%lx (0x%llx instead of 0x%llx)\n", addr,
1726*aca3beaaSApple OSS Distributions offset, got, expected);
1727*aca3beaaSApple OSS Distributions /* NOTREACHED */
1728*aca3beaaSApple OSS Distributions __builtin_unreachable();
1729*aca3beaaSApple OSS Distributions }
1730*aca3beaaSApple OSS Distributions
1731*aca3beaaSApple OSS Distributions panic("mcache_audit: buffer %p modified after free at offset 0x%lx "
1732*aca3beaaSApple OSS Distributions "(0x%llx instead of 0x%llx)\n%s\n",
1733*aca3beaaSApple OSS Distributions addr, offset, got, expected, mcache_dump_mca(buf, mca));
1734*aca3beaaSApple OSS Distributions /* NOTREACHED */
1735*aca3beaaSApple OSS Distributions __builtin_unreachable();
1736*aca3beaaSApple OSS Distributions }
1737*aca3beaaSApple OSS Distributions
1738*aca3beaaSApple OSS Distributions __attribute__((noinline, cold, not_tail_called, noreturn))
1739*aca3beaaSApple OSS Distributions __private_extern__ int
assfail(const char * a,const char * f,int l)1740*aca3beaaSApple OSS Distributions assfail(const char *a, const char *f, int l)
1741*aca3beaaSApple OSS Distributions {
1742*aca3beaaSApple OSS Distributions panic("assertion failed: %s, file: %s, line: %d", a, f, l);
1743*aca3beaaSApple OSS Distributions /* NOTREACHED */
1744*aca3beaaSApple OSS Distributions __builtin_unreachable();
1745*aca3beaaSApple OSS Distributions }
1746