1*2c2f96dcSApple OSS Distributions /*
2*2c2f96dcSApple OSS Distributions * Copyright (c) 2006-2020 Apple Inc. All rights reserved.
3*2c2f96dcSApple OSS Distributions *
4*2c2f96dcSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*2c2f96dcSApple OSS Distributions *
6*2c2f96dcSApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*2c2f96dcSApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*2c2f96dcSApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*2c2f96dcSApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*2c2f96dcSApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*2c2f96dcSApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*2c2f96dcSApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*2c2f96dcSApple OSS Distributions * terms of an Apple operating system software license agreement.
14*2c2f96dcSApple OSS Distributions *
15*2c2f96dcSApple OSS Distributions * Please obtain a copy of the License at
16*2c2f96dcSApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*2c2f96dcSApple OSS Distributions *
18*2c2f96dcSApple OSS Distributions * The Original Code and all software distributed under the License are
19*2c2f96dcSApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*2c2f96dcSApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*2c2f96dcSApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*2c2f96dcSApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*2c2f96dcSApple OSS Distributions * Please see the License for the specific language governing rights and
24*2c2f96dcSApple OSS Distributions * limitations under the License.
25*2c2f96dcSApple OSS Distributions *
26*2c2f96dcSApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*2c2f96dcSApple OSS Distributions */
28*2c2f96dcSApple OSS Distributions
29*2c2f96dcSApple OSS Distributions /*
30*2c2f96dcSApple OSS Distributions * Memory allocator with per-CPU caching, derived from the kmem magazine
31*2c2f96dcSApple OSS Distributions * concept and implementation as described in the following paper:
32*2c2f96dcSApple OSS Distributions * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick.pdf
33*2c2f96dcSApple OSS Distributions * That implementation is Copyright 2006 Sun Microsystems, Inc. All rights
34*2c2f96dcSApple OSS Distributions * reserved. Use is subject to license terms.
35*2c2f96dcSApple OSS Distributions *
36*2c2f96dcSApple OSS Distributions * There are several major differences between this and the original kmem
37*2c2f96dcSApple OSS Distributions * magazine: this derivative implementation allows for multiple objects to
38*2c2f96dcSApple OSS Distributions * be allocated and freed from/to the object cache in one call; in addition,
39*2c2f96dcSApple OSS Distributions * it provides for better flexibility where the user is allowed to define
40*2c2f96dcSApple OSS Distributions * its own slab allocator (instead of the default zone allocator). Finally,
41*2c2f96dcSApple OSS Distributions * no object construction/destruction takes place at the moment, although
42*2c2f96dcSApple OSS Distributions * this could be added in future to improve efficiency.
43*2c2f96dcSApple OSS Distributions */
44*2c2f96dcSApple OSS Distributions
45*2c2f96dcSApple OSS Distributions #include <sys/param.h>
46*2c2f96dcSApple OSS Distributions #include <sys/types.h>
47*2c2f96dcSApple OSS Distributions #include <sys/malloc.h>
48*2c2f96dcSApple OSS Distributions #include <sys/mbuf.h>
49*2c2f96dcSApple OSS Distributions #include <sys/queue.h>
50*2c2f96dcSApple OSS Distributions #include <sys/kernel.h>
51*2c2f96dcSApple OSS Distributions #include <sys/systm.h>
52*2c2f96dcSApple OSS Distributions
53*2c2f96dcSApple OSS Distributions #include <kern/debug.h>
54*2c2f96dcSApple OSS Distributions #include <kern/zalloc.h>
55*2c2f96dcSApple OSS Distributions #include <kern/cpu_number.h>
56*2c2f96dcSApple OSS Distributions #include <kern/locks.h>
57*2c2f96dcSApple OSS Distributions #include <kern/thread_call.h>
58*2c2f96dcSApple OSS Distributions
59*2c2f96dcSApple OSS Distributions #include <libkern/libkern.h>
60*2c2f96dcSApple OSS Distributions #include <libkern/OSAtomic.h>
61*2c2f96dcSApple OSS Distributions #include <libkern/OSDebug.h>
62*2c2f96dcSApple OSS Distributions
63*2c2f96dcSApple OSS Distributions #include <mach/vm_param.h>
64*2c2f96dcSApple OSS Distributions #include <machine/limits.h>
65*2c2f96dcSApple OSS Distributions #include <machine/machine_routines.h>
66*2c2f96dcSApple OSS Distributions
67*2c2f96dcSApple OSS Distributions #include <os/atomic_private.h>
68*2c2f96dcSApple OSS Distributions
69*2c2f96dcSApple OSS Distributions #include <string.h>
70*2c2f96dcSApple OSS Distributions
71*2c2f96dcSApple OSS Distributions #include <sys/mcache.h>
72*2c2f96dcSApple OSS Distributions
73*2c2f96dcSApple OSS Distributions #define MCACHE_SIZE(n) \
74*2c2f96dcSApple OSS Distributions __builtin_offsetof(mcache_t, mc_cpu[n])
75*2c2f96dcSApple OSS Distributions
76*2c2f96dcSApple OSS Distributions /* Allocate extra in case we need to manually align the pointer */
77*2c2f96dcSApple OSS Distributions #define MCACHE_ALLOC_SIZE \
78*2c2f96dcSApple OSS Distributions (sizeof (void *) + MCACHE_SIZE(ncpu) + CPU_CACHE_LINE_SIZE)
79*2c2f96dcSApple OSS Distributions
80*2c2f96dcSApple OSS Distributions #define MCACHE_CPU(c) \
81*2c2f96dcSApple OSS Distributions (mcache_cpu_t *)((void *)((char *)(c) + MCACHE_SIZE(cpu_number())))
82*2c2f96dcSApple OSS Distributions
83*2c2f96dcSApple OSS Distributions /*
84*2c2f96dcSApple OSS Distributions * MCACHE_LIST_LOCK() and MCACHE_LIST_UNLOCK() are macros used
85*2c2f96dcSApple OSS Distributions * to serialize accesses to the global list of caches in the system.
86*2c2f96dcSApple OSS Distributions * They also record the thread currently running in the critical
87*2c2f96dcSApple OSS Distributions * section, so that we can avoid recursive requests to reap the
88*2c2f96dcSApple OSS Distributions * caches when memory runs low.
89*2c2f96dcSApple OSS Distributions */
90*2c2f96dcSApple OSS Distributions #define MCACHE_LIST_LOCK() { \
91*2c2f96dcSApple OSS Distributions lck_mtx_lock(&mcache_llock); \
92*2c2f96dcSApple OSS Distributions mcache_llock_owner = current_thread(); \
93*2c2f96dcSApple OSS Distributions }
94*2c2f96dcSApple OSS Distributions
95*2c2f96dcSApple OSS Distributions #define MCACHE_LIST_UNLOCK() { \
96*2c2f96dcSApple OSS Distributions mcache_llock_owner = NULL; \
97*2c2f96dcSApple OSS Distributions lck_mtx_unlock(&mcache_llock); \
98*2c2f96dcSApple OSS Distributions }
99*2c2f96dcSApple OSS Distributions
100*2c2f96dcSApple OSS Distributions #define MCACHE_LOCK(l) lck_mtx_lock(l)
101*2c2f96dcSApple OSS Distributions #define MCACHE_UNLOCK(l) lck_mtx_unlock(l)
102*2c2f96dcSApple OSS Distributions #define MCACHE_LOCK_TRY(l) lck_mtx_try_lock(l)
103*2c2f96dcSApple OSS Distributions
104*2c2f96dcSApple OSS Distributions static unsigned int ncpu;
105*2c2f96dcSApple OSS Distributions static unsigned int cache_line_size;
106*2c2f96dcSApple OSS Distributions static struct thread *mcache_llock_owner;
107*2c2f96dcSApple OSS Distributions static LCK_GRP_DECLARE(mcache_llock_grp, "mcache.list");
108*2c2f96dcSApple OSS Distributions static LCK_MTX_DECLARE(mcache_llock, &mcache_llock_grp);
109*2c2f96dcSApple OSS Distributions static struct zone *mcache_zone;
110*2c2f96dcSApple OSS Distributions static const uint32_t mcache_reap_interval = 15;
111*2c2f96dcSApple OSS Distributions static const uint32_t mcache_reap_interval_leeway = 2;
112*2c2f96dcSApple OSS Distributions static UInt32 mcache_reaping;
113*2c2f96dcSApple OSS Distributions static int mcache_ready;
114*2c2f96dcSApple OSS Distributions static int mcache_updating;
115*2c2f96dcSApple OSS Distributions
116*2c2f96dcSApple OSS Distributions static int mcache_bkt_contention = 3;
117*2c2f96dcSApple OSS Distributions #if DEBUG
118*2c2f96dcSApple OSS Distributions static unsigned int mcache_flags = MCF_DEBUG;
119*2c2f96dcSApple OSS Distributions #else
120*2c2f96dcSApple OSS Distributions static unsigned int mcache_flags = 0;
121*2c2f96dcSApple OSS Distributions #endif
122*2c2f96dcSApple OSS Distributions
123*2c2f96dcSApple OSS Distributions int mca_trn_max = MCA_TRN_MAX;
124*2c2f96dcSApple OSS Distributions
125*2c2f96dcSApple OSS Distributions static mcache_bkttype_t mcache_bkttype[] = {
126*2c2f96dcSApple OSS Distributions { 1, 4096, 32768, NULL },
127*2c2f96dcSApple OSS Distributions { 3, 2048, 16384, NULL },
128*2c2f96dcSApple OSS Distributions { 7, 1024, 12288, NULL },
129*2c2f96dcSApple OSS Distributions { 15, 256, 8192, NULL },
130*2c2f96dcSApple OSS Distributions { 31, 64, 4096, NULL },
131*2c2f96dcSApple OSS Distributions { 47, 0, 2048, NULL },
132*2c2f96dcSApple OSS Distributions { 63, 0, 1024, NULL },
133*2c2f96dcSApple OSS Distributions { 95, 0, 512, NULL },
134*2c2f96dcSApple OSS Distributions { 143, 0, 256, NULL },
135*2c2f96dcSApple OSS Distributions { 165, 0, 0, NULL },
136*2c2f96dcSApple OSS Distributions };
137*2c2f96dcSApple OSS Distributions
138*2c2f96dcSApple OSS Distributions static mcache_t *mcache_create_common(const char *, size_t, size_t,
139*2c2f96dcSApple OSS Distributions mcache_allocfn_t, mcache_freefn_t, mcache_auditfn_t, mcache_logfn_t,
140*2c2f96dcSApple OSS Distributions mcache_notifyfn_t, void *, u_int32_t, int);
141*2c2f96dcSApple OSS Distributions static unsigned int mcache_slab_alloc(void *, mcache_obj_t ***,
142*2c2f96dcSApple OSS Distributions unsigned int, int);
143*2c2f96dcSApple OSS Distributions static void mcache_slab_free(void *, mcache_obj_t *, boolean_t);
144*2c2f96dcSApple OSS Distributions static void mcache_slab_audit(void *, mcache_obj_t *, boolean_t);
145*2c2f96dcSApple OSS Distributions static void mcache_cpu_refill(mcache_cpu_t *, mcache_bkt_t *, int);
146*2c2f96dcSApple OSS Distributions static void mcache_cpu_batch_refill(mcache_cpu_t *, mcache_bkt_t *, int);
147*2c2f96dcSApple OSS Distributions static uint32_t mcache_bkt_batch_alloc(mcache_t *, mcache_bktlist_t *,
148*2c2f96dcSApple OSS Distributions mcache_bkt_t **, uint32_t);
149*2c2f96dcSApple OSS Distributions static void mcache_bkt_batch_free(mcache_t *, mcache_bktlist_t *, mcache_bkt_t *);
150*2c2f96dcSApple OSS Distributions static void mcache_cache_bkt_enable(mcache_t *);
151*2c2f96dcSApple OSS Distributions static void mcache_bkt_purge(mcache_t *);
152*2c2f96dcSApple OSS Distributions static void mcache_bkt_destroy(mcache_t *, mcache_bkt_t *, int);
153*2c2f96dcSApple OSS Distributions static void mcache_bkt_ws_update(mcache_t *);
154*2c2f96dcSApple OSS Distributions static void mcache_bkt_ws_zero(mcache_t *);
155*2c2f96dcSApple OSS Distributions static void mcache_bkt_ws_reap(mcache_t *);
156*2c2f96dcSApple OSS Distributions static void mcache_dispatch(void (*)(void *), void *);
157*2c2f96dcSApple OSS Distributions static void mcache_cache_reap(mcache_t *);
158*2c2f96dcSApple OSS Distributions static void mcache_cache_update(mcache_t *);
159*2c2f96dcSApple OSS Distributions static void mcache_cache_bkt_resize(void *);
160*2c2f96dcSApple OSS Distributions static void mcache_cache_enable(void *);
161*2c2f96dcSApple OSS Distributions static void mcache_update(thread_call_param_t __unused, thread_call_param_t __unused);
162*2c2f96dcSApple OSS Distributions static void mcache_update_timeout(void *);
163*2c2f96dcSApple OSS Distributions static void mcache_applyall(void (*)(mcache_t *));
164*2c2f96dcSApple OSS Distributions static void mcache_reap_start(void *);
165*2c2f96dcSApple OSS Distributions static void mcache_reap_done(void *);
166*2c2f96dcSApple OSS Distributions static void mcache_reap_timeout(thread_call_param_t __unused, thread_call_param_t);
167*2c2f96dcSApple OSS Distributions static void mcache_notify(mcache_t *, u_int32_t);
168*2c2f96dcSApple OSS Distributions static void mcache_purge(void *);
169*2c2f96dcSApple OSS Distributions __attribute__((noreturn))
170*2c2f96dcSApple OSS Distributions static void mcache_audit_panic(mcache_audit_t *mca, void *addr, size_t offset,
171*2c2f96dcSApple OSS Distributions int64_t expected, int64_t got);
172*2c2f96dcSApple OSS Distributions
173*2c2f96dcSApple OSS Distributions static LIST_HEAD(, mcache) mcache_head;
174*2c2f96dcSApple OSS Distributions mcache_t *mcache_audit_cache;
175*2c2f96dcSApple OSS Distributions
176*2c2f96dcSApple OSS Distributions static thread_call_t mcache_reap_tcall;
177*2c2f96dcSApple OSS Distributions static thread_call_t mcache_update_tcall;
178*2c2f96dcSApple OSS Distributions
179*2c2f96dcSApple OSS Distributions /*
180*2c2f96dcSApple OSS Distributions * Initialize the framework; this is currently called as part of BSD init.
181*2c2f96dcSApple OSS Distributions */
182*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_init(void)183*2c2f96dcSApple OSS Distributions mcache_init(void)
184*2c2f96dcSApple OSS Distributions {
185*2c2f96dcSApple OSS Distributions mcache_bkttype_t *btp;
186*2c2f96dcSApple OSS Distributions unsigned int i;
187*2c2f96dcSApple OSS Distributions char name[32];
188*2c2f96dcSApple OSS Distributions
189*2c2f96dcSApple OSS Distributions VERIFY(mca_trn_max >= 2);
190*2c2f96dcSApple OSS Distributions
191*2c2f96dcSApple OSS Distributions ncpu = ml_wait_max_cpus();
192*2c2f96dcSApple OSS Distributions (void) mcache_cache_line_size(); /* prime it */
193*2c2f96dcSApple OSS Distributions
194*2c2f96dcSApple OSS Distributions mcache_reap_tcall = thread_call_allocate(mcache_reap_timeout, NULL);
195*2c2f96dcSApple OSS Distributions mcache_update_tcall = thread_call_allocate(mcache_update, NULL);
196*2c2f96dcSApple OSS Distributions if (mcache_reap_tcall == NULL || mcache_update_tcall == NULL) {
197*2c2f96dcSApple OSS Distributions panic("mcache_init: thread_call_allocate failed");
198*2c2f96dcSApple OSS Distributions /* NOTREACHED */
199*2c2f96dcSApple OSS Distributions __builtin_unreachable();
200*2c2f96dcSApple OSS Distributions }
201*2c2f96dcSApple OSS Distributions
202*2c2f96dcSApple OSS Distributions mcache_zone = zone_create("mcache", MCACHE_ALLOC_SIZE,
203*2c2f96dcSApple OSS Distributions ZC_PGZ_USE_GUARDS | ZC_DESTRUCTIBLE);
204*2c2f96dcSApple OSS Distributions
205*2c2f96dcSApple OSS Distributions LIST_INIT(&mcache_head);
206*2c2f96dcSApple OSS Distributions
207*2c2f96dcSApple OSS Distributions for (i = 0; i < sizeof(mcache_bkttype) / sizeof(*btp); i++) {
208*2c2f96dcSApple OSS Distributions btp = &mcache_bkttype[i];
209*2c2f96dcSApple OSS Distributions (void) snprintf(name, sizeof(name), "bkt_%d",
210*2c2f96dcSApple OSS Distributions btp->bt_bktsize);
211*2c2f96dcSApple OSS Distributions btp->bt_cache = mcache_create(name,
212*2c2f96dcSApple OSS Distributions (btp->bt_bktsize + 1) * sizeof(void *), 0, 0, MCR_SLEEP);
213*2c2f96dcSApple OSS Distributions }
214*2c2f96dcSApple OSS Distributions
215*2c2f96dcSApple OSS Distributions PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof(mcache_flags));
216*2c2f96dcSApple OSS Distributions mcache_flags &= MCF_FLAGS_MASK;
217*2c2f96dcSApple OSS Distributions
218*2c2f96dcSApple OSS Distributions mcache_audit_cache = mcache_create("audit", sizeof(mcache_audit_t),
219*2c2f96dcSApple OSS Distributions 0, 0, MCR_SLEEP);
220*2c2f96dcSApple OSS Distributions
221*2c2f96dcSApple OSS Distributions mcache_applyall(mcache_cache_bkt_enable);
222*2c2f96dcSApple OSS Distributions mcache_ready = 1;
223*2c2f96dcSApple OSS Distributions
224*2c2f96dcSApple OSS Distributions printf("mcache: %d CPU(s), %d bytes CPU cache line size\n",
225*2c2f96dcSApple OSS Distributions ncpu, CPU_CACHE_LINE_SIZE);
226*2c2f96dcSApple OSS Distributions }
227*2c2f96dcSApple OSS Distributions
228*2c2f96dcSApple OSS Distributions /*
229*2c2f96dcSApple OSS Distributions * Return the global mcache flags.
230*2c2f96dcSApple OSS Distributions */
231*2c2f96dcSApple OSS Distributions __private_extern__ unsigned int
mcache_getflags(void)232*2c2f96dcSApple OSS Distributions mcache_getflags(void)
233*2c2f96dcSApple OSS Distributions {
234*2c2f96dcSApple OSS Distributions return mcache_flags;
235*2c2f96dcSApple OSS Distributions }
236*2c2f96dcSApple OSS Distributions
237*2c2f96dcSApple OSS Distributions /*
238*2c2f96dcSApple OSS Distributions * Return the CPU cache line size.
239*2c2f96dcSApple OSS Distributions */
240*2c2f96dcSApple OSS Distributions __private_extern__ unsigned int
mcache_cache_line_size(void)241*2c2f96dcSApple OSS Distributions mcache_cache_line_size(void)
242*2c2f96dcSApple OSS Distributions {
243*2c2f96dcSApple OSS Distributions if (cache_line_size == 0) {
244*2c2f96dcSApple OSS Distributions ml_cpu_info_t cpu_info;
245*2c2f96dcSApple OSS Distributions ml_cpu_get_info(&cpu_info);
246*2c2f96dcSApple OSS Distributions cache_line_size = (unsigned int)cpu_info.cache_line_size;
247*2c2f96dcSApple OSS Distributions }
248*2c2f96dcSApple OSS Distributions return cache_line_size;
249*2c2f96dcSApple OSS Distributions }
250*2c2f96dcSApple OSS Distributions
251*2c2f96dcSApple OSS Distributions /*
252*2c2f96dcSApple OSS Distributions * Create a cache using the zone allocator as the backend slab allocator.
253*2c2f96dcSApple OSS Distributions * The caller may specify any alignment for the object; if it specifies 0
254*2c2f96dcSApple OSS Distributions * the default alignment (MCACHE_ALIGN) will be used.
255*2c2f96dcSApple OSS Distributions */
256*2c2f96dcSApple OSS Distributions __private_extern__ mcache_t *
mcache_create(const char * name,size_t bufsize,size_t align,u_int32_t flags,int wait __unused)257*2c2f96dcSApple OSS Distributions mcache_create(const char *name, size_t bufsize, size_t align,
258*2c2f96dcSApple OSS Distributions u_int32_t flags, int wait __unused)
259*2c2f96dcSApple OSS Distributions {
260*2c2f96dcSApple OSS Distributions return mcache_create_common(name, bufsize, align, mcache_slab_alloc,
261*2c2f96dcSApple OSS Distributions mcache_slab_free, mcache_slab_audit, NULL, NULL, NULL, flags, 1);
262*2c2f96dcSApple OSS Distributions }
263*2c2f96dcSApple OSS Distributions
264*2c2f96dcSApple OSS Distributions /*
265*2c2f96dcSApple OSS Distributions * Create a cache using a custom backend slab allocator. Since the caller
266*2c2f96dcSApple OSS Distributions * is responsible for allocation, no alignment guarantee will be provided
267*2c2f96dcSApple OSS Distributions * by this framework.
268*2c2f96dcSApple OSS Distributions */
269*2c2f96dcSApple OSS Distributions __private_extern__ mcache_t *
mcache_create_ext(const char * name,size_t bufsize,mcache_allocfn_t allocfn,mcache_freefn_t freefn,mcache_auditfn_t auditfn,mcache_logfn_t logfn,mcache_notifyfn_t notifyfn,void * arg,u_int32_t flags,int wait __unused)270*2c2f96dcSApple OSS Distributions mcache_create_ext(const char *name, size_t bufsize,
271*2c2f96dcSApple OSS Distributions mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
272*2c2f96dcSApple OSS Distributions mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
273*2c2f96dcSApple OSS Distributions u_int32_t flags, int wait __unused)
274*2c2f96dcSApple OSS Distributions {
275*2c2f96dcSApple OSS Distributions return mcache_create_common(name, bufsize, 0, allocfn,
276*2c2f96dcSApple OSS Distributions freefn, auditfn, logfn, notifyfn, arg, flags, 0);
277*2c2f96dcSApple OSS Distributions }
278*2c2f96dcSApple OSS Distributions
279*2c2f96dcSApple OSS Distributions /*
280*2c2f96dcSApple OSS Distributions * Common cache creation routine.
281*2c2f96dcSApple OSS Distributions */
282*2c2f96dcSApple OSS Distributions static mcache_t *
mcache_create_common(const char * name,size_t bufsize,size_t align,mcache_allocfn_t allocfn,mcache_freefn_t freefn,mcache_auditfn_t auditfn,mcache_logfn_t logfn,mcache_notifyfn_t notifyfn,void * arg,u_int32_t flags,int need_zone)283*2c2f96dcSApple OSS Distributions mcache_create_common(const char *name, size_t bufsize, size_t align,
284*2c2f96dcSApple OSS Distributions mcache_allocfn_t allocfn, mcache_freefn_t freefn, mcache_auditfn_t auditfn,
285*2c2f96dcSApple OSS Distributions mcache_logfn_t logfn, mcache_notifyfn_t notifyfn, void *arg,
286*2c2f96dcSApple OSS Distributions u_int32_t flags, int need_zone)
287*2c2f96dcSApple OSS Distributions {
288*2c2f96dcSApple OSS Distributions mcache_bkttype_t *btp;
289*2c2f96dcSApple OSS Distributions mcache_t *cp = NULL;
290*2c2f96dcSApple OSS Distributions size_t chunksize;
291*2c2f96dcSApple OSS Distributions void *buf, **pbuf;
292*2c2f96dcSApple OSS Distributions unsigned int c;
293*2c2f96dcSApple OSS Distributions char lck_name[64];
294*2c2f96dcSApple OSS Distributions
295*2c2f96dcSApple OSS Distributions buf = zalloc_flags(mcache_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
296*2c2f96dcSApple OSS Distributions
297*2c2f96dcSApple OSS Distributions /*
298*2c2f96dcSApple OSS Distributions * In case we didn't get a cache-aligned memory, round it up
299*2c2f96dcSApple OSS Distributions * accordingly. This is needed in order to get the rest of
300*2c2f96dcSApple OSS Distributions * structure members aligned properly. It also means that
301*2c2f96dcSApple OSS Distributions * the memory span gets shifted due to the round up, but it
302*2c2f96dcSApple OSS Distributions * is okay since we've allocated extra space for this.
303*2c2f96dcSApple OSS Distributions */
304*2c2f96dcSApple OSS Distributions cp = (mcache_t *)
305*2c2f96dcSApple OSS Distributions P2ROUNDUP((intptr_t)buf + sizeof(void *), CPU_CACHE_LINE_SIZE);
306*2c2f96dcSApple OSS Distributions pbuf = (void **)((intptr_t)cp - sizeof(void *));
307*2c2f96dcSApple OSS Distributions *pbuf = buf;
308*2c2f96dcSApple OSS Distributions
309*2c2f96dcSApple OSS Distributions /*
310*2c2f96dcSApple OSS Distributions * Guaranteed alignment is valid only when we use the internal
311*2c2f96dcSApple OSS Distributions * slab allocator (currently set to use the zone allocator).
312*2c2f96dcSApple OSS Distributions */
313*2c2f96dcSApple OSS Distributions if (!need_zone) {
314*2c2f96dcSApple OSS Distributions align = 1;
315*2c2f96dcSApple OSS Distributions } else {
316*2c2f96dcSApple OSS Distributions /* Enforce 64-bit minimum alignment for zone-based buffers */
317*2c2f96dcSApple OSS Distributions if (align == 0) {
318*2c2f96dcSApple OSS Distributions align = MCACHE_ALIGN;
319*2c2f96dcSApple OSS Distributions }
320*2c2f96dcSApple OSS Distributions align = P2ROUNDUP(align, MCACHE_ALIGN);
321*2c2f96dcSApple OSS Distributions }
322*2c2f96dcSApple OSS Distributions
323*2c2f96dcSApple OSS Distributions if ((align & (align - 1)) != 0) {
324*2c2f96dcSApple OSS Distributions panic("mcache_create: bad alignment %lu", align);
325*2c2f96dcSApple OSS Distributions /* NOTREACHED */
326*2c2f96dcSApple OSS Distributions __builtin_unreachable();
327*2c2f96dcSApple OSS Distributions }
328*2c2f96dcSApple OSS Distributions
329*2c2f96dcSApple OSS Distributions cp->mc_align = align;
330*2c2f96dcSApple OSS Distributions cp->mc_slab_alloc = allocfn;
331*2c2f96dcSApple OSS Distributions cp->mc_slab_free = freefn;
332*2c2f96dcSApple OSS Distributions cp->mc_slab_audit = auditfn;
333*2c2f96dcSApple OSS Distributions cp->mc_slab_log = logfn;
334*2c2f96dcSApple OSS Distributions cp->mc_slab_notify = notifyfn;
335*2c2f96dcSApple OSS Distributions cp->mc_private = need_zone ? cp : arg;
336*2c2f96dcSApple OSS Distributions cp->mc_bufsize = bufsize;
337*2c2f96dcSApple OSS Distributions cp->mc_flags = (flags & MCF_FLAGS_MASK) | mcache_flags;
338*2c2f96dcSApple OSS Distributions
339*2c2f96dcSApple OSS Distributions (void) snprintf(cp->mc_name, sizeof(cp->mc_name), "mcache.%s", name);
340*2c2f96dcSApple OSS Distributions
341*2c2f96dcSApple OSS Distributions (void) snprintf(lck_name, sizeof(lck_name), "%s.cpu", cp->mc_name);
342*2c2f96dcSApple OSS Distributions cp->mc_cpu_lock_grp = lck_grp_alloc_init(lck_name, LCK_GRP_ATTR_NULL);
343*2c2f96dcSApple OSS Distributions
344*2c2f96dcSApple OSS Distributions /*
345*2c2f96dcSApple OSS Distributions * Allocation chunk size is the object's size plus any extra size
346*2c2f96dcSApple OSS Distributions * needed to satisfy the object's alignment. It is enforced to be
347*2c2f96dcSApple OSS Distributions * at least the size of an LP64 pointer to simplify auditing and to
348*2c2f96dcSApple OSS Distributions * handle multiple-element allocation requests, where the elements
349*2c2f96dcSApple OSS Distributions * returned are linked together in a list.
350*2c2f96dcSApple OSS Distributions */
351*2c2f96dcSApple OSS Distributions chunksize = MAX(bufsize, sizeof(u_int64_t));
352*2c2f96dcSApple OSS Distributions if (need_zone) {
353*2c2f96dcSApple OSS Distributions VERIFY(align != 0 && (align % MCACHE_ALIGN) == 0);
354*2c2f96dcSApple OSS Distributions chunksize += sizeof(uint64_t) + align;
355*2c2f96dcSApple OSS Distributions chunksize = P2ROUNDUP(chunksize, align);
356*2c2f96dcSApple OSS Distributions cp->mc_slab_zone = zone_create(cp->mc_name, chunksize,
357*2c2f96dcSApple OSS Distributions ZC_PGZ_USE_GUARDS | ZC_DESTRUCTIBLE);
358*2c2f96dcSApple OSS Distributions }
359*2c2f96dcSApple OSS Distributions cp->mc_chunksize = chunksize;
360*2c2f96dcSApple OSS Distributions
361*2c2f96dcSApple OSS Distributions /*
362*2c2f96dcSApple OSS Distributions * Initialize the bucket layer.
363*2c2f96dcSApple OSS Distributions */
364*2c2f96dcSApple OSS Distributions (void) snprintf(lck_name, sizeof(lck_name), "%s.bkt", cp->mc_name);
365*2c2f96dcSApple OSS Distributions cp->mc_bkt_lock_grp = lck_grp_alloc_init(lck_name,
366*2c2f96dcSApple OSS Distributions LCK_GRP_ATTR_NULL);
367*2c2f96dcSApple OSS Distributions lck_mtx_init(&cp->mc_bkt_lock, cp->mc_bkt_lock_grp, LCK_ATTR_NULL);
368*2c2f96dcSApple OSS Distributions
369*2c2f96dcSApple OSS Distributions (void) snprintf(lck_name, sizeof(lck_name), "%s.sync", cp->mc_name);
370*2c2f96dcSApple OSS Distributions cp->mc_sync_lock_grp = lck_grp_alloc_init(lck_name,
371*2c2f96dcSApple OSS Distributions LCK_GRP_ATTR_NULL);
372*2c2f96dcSApple OSS Distributions lck_mtx_init(&cp->mc_sync_lock, cp->mc_sync_lock_grp, LCK_ATTR_NULL);
373*2c2f96dcSApple OSS Distributions
374*2c2f96dcSApple OSS Distributions for (btp = mcache_bkttype; chunksize <= btp->bt_minbuf; btp++) {
375*2c2f96dcSApple OSS Distributions continue;
376*2c2f96dcSApple OSS Distributions }
377*2c2f96dcSApple OSS Distributions
378*2c2f96dcSApple OSS Distributions cp->cache_bkttype = btp;
379*2c2f96dcSApple OSS Distributions
380*2c2f96dcSApple OSS Distributions /*
381*2c2f96dcSApple OSS Distributions * Initialize the CPU layer. Each per-CPU structure is aligned
382*2c2f96dcSApple OSS Distributions * on the CPU cache line boundary to prevent false sharing.
383*2c2f96dcSApple OSS Distributions */
384*2c2f96dcSApple OSS Distributions for (c = 0; c < ncpu; c++) {
385*2c2f96dcSApple OSS Distributions mcache_cpu_t *ccp = &cp->mc_cpu[c];
386*2c2f96dcSApple OSS Distributions
387*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(ccp, CPU_CACHE_LINE_SIZE));
388*2c2f96dcSApple OSS Distributions lck_mtx_init(&ccp->cc_lock, cp->mc_cpu_lock_grp, LCK_ATTR_NULL);
389*2c2f96dcSApple OSS Distributions ccp->cc_objs = -1;
390*2c2f96dcSApple OSS Distributions ccp->cc_pobjs = -1;
391*2c2f96dcSApple OSS Distributions }
392*2c2f96dcSApple OSS Distributions
393*2c2f96dcSApple OSS Distributions if (mcache_ready) {
394*2c2f96dcSApple OSS Distributions mcache_cache_bkt_enable(cp);
395*2c2f96dcSApple OSS Distributions }
396*2c2f96dcSApple OSS Distributions
397*2c2f96dcSApple OSS Distributions /* TODO: dynamically create sysctl for stats */
398*2c2f96dcSApple OSS Distributions
399*2c2f96dcSApple OSS Distributions MCACHE_LIST_LOCK();
400*2c2f96dcSApple OSS Distributions LIST_INSERT_HEAD(&mcache_head, cp, mc_list);
401*2c2f96dcSApple OSS Distributions MCACHE_LIST_UNLOCK();
402*2c2f96dcSApple OSS Distributions
403*2c2f96dcSApple OSS Distributions /*
404*2c2f96dcSApple OSS Distributions * If cache buckets are enabled and this is the first cache
405*2c2f96dcSApple OSS Distributions * created, start the periodic cache update.
406*2c2f96dcSApple OSS Distributions */
407*2c2f96dcSApple OSS Distributions if (!(mcache_flags & MCF_NOCPUCACHE) && !mcache_updating) {
408*2c2f96dcSApple OSS Distributions mcache_updating = 1;
409*2c2f96dcSApple OSS Distributions mcache_update_timeout(NULL);
410*2c2f96dcSApple OSS Distributions }
411*2c2f96dcSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
412*2c2f96dcSApple OSS Distributions printf("mcache_create: %s (%s) arg %p bufsize %lu align %lu "
413*2c2f96dcSApple OSS Distributions "chunksize %lu bktsize %d\n", name, need_zone ? "i" : "e",
414*2c2f96dcSApple OSS Distributions arg, bufsize, cp->mc_align, chunksize, btp->bt_bktsize);
415*2c2f96dcSApple OSS Distributions }
416*2c2f96dcSApple OSS Distributions return cp;
417*2c2f96dcSApple OSS Distributions }
418*2c2f96dcSApple OSS Distributions
419*2c2f96dcSApple OSS Distributions /*
420*2c2f96dcSApple OSS Distributions * Allocate one or more objects from a cache.
421*2c2f96dcSApple OSS Distributions */
422*2c2f96dcSApple OSS Distributions __private_extern__ unsigned int
mcache_alloc_ext(mcache_t * cp,mcache_obj_t ** list,unsigned int num,int wait)423*2c2f96dcSApple OSS Distributions mcache_alloc_ext(mcache_t *cp, mcache_obj_t **list, unsigned int num, int wait)
424*2c2f96dcSApple OSS Distributions {
425*2c2f96dcSApple OSS Distributions mcache_cpu_t *ccp;
426*2c2f96dcSApple OSS Distributions mcache_obj_t **top = &(*list);
427*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt;
428*2c2f96dcSApple OSS Distributions unsigned int need = num;
429*2c2f96dcSApple OSS Distributions boolean_t nwretry = FALSE;
430*2c2f96dcSApple OSS Distributions
431*2c2f96dcSApple OSS Distributions /* MCR_NOSLEEP and MCR_FAILOK are mutually exclusive */
432*2c2f96dcSApple OSS Distributions VERIFY((wait & (MCR_NOSLEEP | MCR_FAILOK)) != (MCR_NOSLEEP | MCR_FAILOK));
433*2c2f96dcSApple OSS Distributions
434*2c2f96dcSApple OSS Distributions ASSERT(list != NULL);
435*2c2f96dcSApple OSS Distributions *list = NULL;
436*2c2f96dcSApple OSS Distributions
437*2c2f96dcSApple OSS Distributions if (num == 0) {
438*2c2f96dcSApple OSS Distributions return 0;
439*2c2f96dcSApple OSS Distributions }
440*2c2f96dcSApple OSS Distributions
441*2c2f96dcSApple OSS Distributions retry_alloc:
442*2c2f96dcSApple OSS Distributions /* We may not always be running in the same CPU in case of retries */
443*2c2f96dcSApple OSS Distributions ccp = MCACHE_CPU(cp);
444*2c2f96dcSApple OSS Distributions
445*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
446*2c2f96dcSApple OSS Distributions for (;;) {
447*2c2f96dcSApple OSS Distributions /*
448*2c2f96dcSApple OSS Distributions * If we have an object in the current CPU's filled bucket,
449*2c2f96dcSApple OSS Distributions * chain the object to any previous objects and return if
450*2c2f96dcSApple OSS Distributions * we've satisfied the number of requested objects.
451*2c2f96dcSApple OSS Distributions */
452*2c2f96dcSApple OSS Distributions if (ccp->cc_objs > 0) {
453*2c2f96dcSApple OSS Distributions mcache_obj_t *tail;
454*2c2f96dcSApple OSS Distributions int objs;
455*2c2f96dcSApple OSS Distributions
456*2c2f96dcSApple OSS Distributions /*
457*2c2f96dcSApple OSS Distributions * Objects in the bucket are already linked together
458*2c2f96dcSApple OSS Distributions * with the most recently freed object at the head of
459*2c2f96dcSApple OSS Distributions * the list; grab as many objects as we can.
460*2c2f96dcSApple OSS Distributions */
461*2c2f96dcSApple OSS Distributions objs = MIN((unsigned int)ccp->cc_objs, need);
462*2c2f96dcSApple OSS Distributions *list = ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
463*2c2f96dcSApple OSS Distributions ccp->cc_objs -= objs;
464*2c2f96dcSApple OSS Distributions ccp->cc_alloc += objs;
465*2c2f96dcSApple OSS Distributions
466*2c2f96dcSApple OSS Distributions tail = ccp->cc_filled->bkt_obj[ccp->cc_objs];
467*2c2f96dcSApple OSS Distributions list = &tail->obj_next;
468*2c2f96dcSApple OSS Distributions *list = NULL;
469*2c2f96dcSApple OSS Distributions
470*2c2f96dcSApple OSS Distributions /* If we got them all, return to caller */
471*2c2f96dcSApple OSS Distributions if ((need -= objs) == 0) {
472*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
473*2c2f96dcSApple OSS Distributions
474*2c2f96dcSApple OSS Distributions if (!(cp->mc_flags & MCF_NOLEAKLOG) &&
475*2c2f96dcSApple OSS Distributions cp->mc_slab_log != NULL) {
476*2c2f96dcSApple OSS Distributions (*cp->mc_slab_log)(num, *top, TRUE);
477*2c2f96dcSApple OSS Distributions }
478*2c2f96dcSApple OSS Distributions
479*2c2f96dcSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
480*2c2f96dcSApple OSS Distributions goto debug_alloc;
481*2c2f96dcSApple OSS Distributions }
482*2c2f96dcSApple OSS Distributions
483*2c2f96dcSApple OSS Distributions return num;
484*2c2f96dcSApple OSS Distributions }
485*2c2f96dcSApple OSS Distributions }
486*2c2f96dcSApple OSS Distributions
487*2c2f96dcSApple OSS Distributions /*
488*2c2f96dcSApple OSS Distributions * The CPU's filled bucket is empty. If the previous filled
489*2c2f96dcSApple OSS Distributions * bucket was full, exchange and try again.
490*2c2f96dcSApple OSS Distributions */
491*2c2f96dcSApple OSS Distributions if (ccp->cc_pobjs > 0) {
492*2c2f96dcSApple OSS Distributions mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
493*2c2f96dcSApple OSS Distributions continue;
494*2c2f96dcSApple OSS Distributions }
495*2c2f96dcSApple OSS Distributions
496*2c2f96dcSApple OSS Distributions /*
497*2c2f96dcSApple OSS Distributions * If the bucket layer is disabled, allocate from slab. This
498*2c2f96dcSApple OSS Distributions * can happen either because MCF_NOCPUCACHE is set, or because
499*2c2f96dcSApple OSS Distributions * the bucket layer is currently being resized.
500*2c2f96dcSApple OSS Distributions */
501*2c2f96dcSApple OSS Distributions if (ccp->cc_bktsize == 0) {
502*2c2f96dcSApple OSS Distributions break;
503*2c2f96dcSApple OSS Distributions }
504*2c2f96dcSApple OSS Distributions
505*2c2f96dcSApple OSS Distributions /*
506*2c2f96dcSApple OSS Distributions * Both of the CPU's buckets are empty; try to get full
507*2c2f96dcSApple OSS Distributions * bucket(s) from the bucket layer. Upon success, refill
508*2c2f96dcSApple OSS Distributions * this CPU and place any empty bucket into the empty list.
509*2c2f96dcSApple OSS Distributions * To prevent potential thrashing, replace both empty buckets
510*2c2f96dcSApple OSS Distributions * only if the requested count exceeds a bucket's worth of
511*2c2f96dcSApple OSS Distributions * objects.
512*2c2f96dcSApple OSS Distributions */
513*2c2f96dcSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_full,
514*2c2f96dcSApple OSS Distributions &bkt, (need <= ccp->cc_bktsize) ? 1 : 2);
515*2c2f96dcSApple OSS Distributions if (bkt != NULL) {
516*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt_list = NULL;
517*2c2f96dcSApple OSS Distributions
518*2c2f96dcSApple OSS Distributions if (ccp->cc_pfilled != NULL) {
519*2c2f96dcSApple OSS Distributions ccp->cc_pfilled->bkt_next = bkt_list;
520*2c2f96dcSApple OSS Distributions bkt_list = ccp->cc_pfilled;
521*2c2f96dcSApple OSS Distributions }
522*2c2f96dcSApple OSS Distributions if (bkt->bkt_next == NULL) {
523*2c2f96dcSApple OSS Distributions /*
524*2c2f96dcSApple OSS Distributions * Bucket layer allocation returns only 1
525*2c2f96dcSApple OSS Distributions * magazine; retain current empty magazine.
526*2c2f96dcSApple OSS Distributions */
527*2c2f96dcSApple OSS Distributions mcache_cpu_refill(ccp, bkt, ccp->cc_bktsize);
528*2c2f96dcSApple OSS Distributions } else {
529*2c2f96dcSApple OSS Distributions /*
530*2c2f96dcSApple OSS Distributions * We got 2 full buckets from the bucket
531*2c2f96dcSApple OSS Distributions * layer; release the current empty bucket
532*2c2f96dcSApple OSS Distributions * back to the bucket layer.
533*2c2f96dcSApple OSS Distributions */
534*2c2f96dcSApple OSS Distributions if (ccp->cc_filled != NULL) {
535*2c2f96dcSApple OSS Distributions ccp->cc_filled->bkt_next = bkt_list;
536*2c2f96dcSApple OSS Distributions bkt_list = ccp->cc_filled;
537*2c2f96dcSApple OSS Distributions }
538*2c2f96dcSApple OSS Distributions mcache_cpu_batch_refill(ccp, bkt,
539*2c2f96dcSApple OSS Distributions ccp->cc_bktsize);
540*2c2f96dcSApple OSS Distributions }
541*2c2f96dcSApple OSS Distributions mcache_bkt_batch_free(cp, &cp->mc_empty, bkt_list);
542*2c2f96dcSApple OSS Distributions continue;
543*2c2f96dcSApple OSS Distributions }
544*2c2f96dcSApple OSS Distributions
545*2c2f96dcSApple OSS Distributions /*
546*2c2f96dcSApple OSS Distributions * The bucket layer has no full buckets; allocate the
547*2c2f96dcSApple OSS Distributions * object(s) directly from the slab layer.
548*2c2f96dcSApple OSS Distributions */
549*2c2f96dcSApple OSS Distributions break;
550*2c2f96dcSApple OSS Distributions }
551*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
552*2c2f96dcSApple OSS Distributions
553*2c2f96dcSApple OSS Distributions need -= (*cp->mc_slab_alloc)(cp->mc_private, &list, need, wait);
554*2c2f96dcSApple OSS Distributions
555*2c2f96dcSApple OSS Distributions /*
556*2c2f96dcSApple OSS Distributions * If this is a blocking allocation, or if it is non-blocking and
557*2c2f96dcSApple OSS Distributions * the cache's full bucket is non-empty, then retry the allocation.
558*2c2f96dcSApple OSS Distributions */
559*2c2f96dcSApple OSS Distributions if (need > 0) {
560*2c2f96dcSApple OSS Distributions if (!(wait & MCR_NONBLOCKING)) {
561*2c2f96dcSApple OSS Distributions os_atomic_inc(&cp->mc_wretry_cnt, relaxed);
562*2c2f96dcSApple OSS Distributions goto retry_alloc;
563*2c2f96dcSApple OSS Distributions } else if ((wait & (MCR_NOSLEEP | MCR_TRYHARD)) &&
564*2c2f96dcSApple OSS Distributions !mcache_bkt_isempty(cp)) {
565*2c2f96dcSApple OSS Distributions if (!nwretry) {
566*2c2f96dcSApple OSS Distributions nwretry = TRUE;
567*2c2f96dcSApple OSS Distributions }
568*2c2f96dcSApple OSS Distributions os_atomic_inc(&cp->mc_nwretry_cnt, relaxed);
569*2c2f96dcSApple OSS Distributions goto retry_alloc;
570*2c2f96dcSApple OSS Distributions } else if (nwretry) {
571*2c2f96dcSApple OSS Distributions os_atomic_inc(&cp->mc_nwfail_cnt, relaxed);
572*2c2f96dcSApple OSS Distributions }
573*2c2f96dcSApple OSS Distributions }
574*2c2f96dcSApple OSS Distributions
575*2c2f96dcSApple OSS Distributions if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) {
576*2c2f96dcSApple OSS Distributions (*cp->mc_slab_log)((num - need), *top, TRUE);
577*2c2f96dcSApple OSS Distributions }
578*2c2f96dcSApple OSS Distributions
579*2c2f96dcSApple OSS Distributions if (!(cp->mc_flags & MCF_DEBUG)) {
580*2c2f96dcSApple OSS Distributions return num - need;
581*2c2f96dcSApple OSS Distributions }
582*2c2f96dcSApple OSS Distributions
583*2c2f96dcSApple OSS Distributions debug_alloc:
584*2c2f96dcSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
585*2c2f96dcSApple OSS Distributions mcache_obj_t **o = top;
586*2c2f96dcSApple OSS Distributions unsigned int n;
587*2c2f96dcSApple OSS Distributions
588*2c2f96dcSApple OSS Distributions n = 0;
589*2c2f96dcSApple OSS Distributions /*
590*2c2f96dcSApple OSS Distributions * Verify that the chain of objects have the same count as
591*2c2f96dcSApple OSS Distributions * what we are about to report to the caller. Any mismatch
592*2c2f96dcSApple OSS Distributions * here means that the object list is insanely broken and
593*2c2f96dcSApple OSS Distributions * therefore we must panic.
594*2c2f96dcSApple OSS Distributions */
595*2c2f96dcSApple OSS Distributions while (*o != NULL) {
596*2c2f96dcSApple OSS Distributions o = &(*o)->obj_next;
597*2c2f96dcSApple OSS Distributions ++n;
598*2c2f96dcSApple OSS Distributions }
599*2c2f96dcSApple OSS Distributions if (n != (num - need)) {
600*2c2f96dcSApple OSS Distributions panic("mcache_alloc_ext: %s cp %p corrupted list "
601*2c2f96dcSApple OSS Distributions "(got %d actual %d)\n", cp->mc_name,
602*2c2f96dcSApple OSS Distributions (void *)cp, num - need, n);
603*2c2f96dcSApple OSS Distributions /* NOTREACHED */
604*2c2f96dcSApple OSS Distributions __builtin_unreachable();
605*2c2f96dcSApple OSS Distributions }
606*2c2f96dcSApple OSS Distributions }
607*2c2f96dcSApple OSS Distributions
608*2c2f96dcSApple OSS Distributions /* Invoke the slab layer audit callback if auditing is enabled */
609*2c2f96dcSApple OSS Distributions if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) {
610*2c2f96dcSApple OSS Distributions (*cp->mc_slab_audit)(cp->mc_private, *top, TRUE);
611*2c2f96dcSApple OSS Distributions }
612*2c2f96dcSApple OSS Distributions
613*2c2f96dcSApple OSS Distributions return num - need;
614*2c2f96dcSApple OSS Distributions }
615*2c2f96dcSApple OSS Distributions
616*2c2f96dcSApple OSS Distributions /*
617*2c2f96dcSApple OSS Distributions * Allocate a single object from a cache.
618*2c2f96dcSApple OSS Distributions */
619*2c2f96dcSApple OSS Distributions __private_extern__ void *
mcache_alloc(mcache_t * cp,int wait)620*2c2f96dcSApple OSS Distributions mcache_alloc(mcache_t *cp, int wait)
621*2c2f96dcSApple OSS Distributions {
622*2c2f96dcSApple OSS Distributions mcache_obj_t *buf;
623*2c2f96dcSApple OSS Distributions
624*2c2f96dcSApple OSS Distributions (void) mcache_alloc_ext(cp, &buf, 1, wait);
625*2c2f96dcSApple OSS Distributions return buf;
626*2c2f96dcSApple OSS Distributions }
627*2c2f96dcSApple OSS Distributions
628*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_waiter_inc(mcache_t * cp)629*2c2f96dcSApple OSS Distributions mcache_waiter_inc(mcache_t *cp)
630*2c2f96dcSApple OSS Distributions {
631*2c2f96dcSApple OSS Distributions os_atomic_inc(&cp->mc_waiter_cnt, relaxed);
632*2c2f96dcSApple OSS Distributions }
633*2c2f96dcSApple OSS Distributions
634*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_waiter_dec(mcache_t * cp)635*2c2f96dcSApple OSS Distributions mcache_waiter_dec(mcache_t *cp)
636*2c2f96dcSApple OSS Distributions {
637*2c2f96dcSApple OSS Distributions os_atomic_dec(&cp->mc_waiter_cnt, relaxed);
638*2c2f96dcSApple OSS Distributions }
639*2c2f96dcSApple OSS Distributions
640*2c2f96dcSApple OSS Distributions __private_extern__ boolean_t
mcache_bkt_isempty(mcache_t * cp)641*2c2f96dcSApple OSS Distributions mcache_bkt_isempty(mcache_t *cp)
642*2c2f96dcSApple OSS Distributions {
643*2c2f96dcSApple OSS Distributions /*
644*2c2f96dcSApple OSS Distributions * This isn't meant to accurately tell whether there are
645*2c2f96dcSApple OSS Distributions * any full buckets in the cache; it is simply a way to
646*2c2f96dcSApple OSS Distributions * obtain "hints" about the state of the cache.
647*2c2f96dcSApple OSS Distributions */
648*2c2f96dcSApple OSS Distributions return cp->mc_full.bl_total == 0;
649*2c2f96dcSApple OSS Distributions }
650*2c2f96dcSApple OSS Distributions
651*2c2f96dcSApple OSS Distributions /*
652*2c2f96dcSApple OSS Distributions * Notify the slab layer about an event.
653*2c2f96dcSApple OSS Distributions */
654*2c2f96dcSApple OSS Distributions static void
mcache_notify(mcache_t * cp,u_int32_t event)655*2c2f96dcSApple OSS Distributions mcache_notify(mcache_t *cp, u_int32_t event)
656*2c2f96dcSApple OSS Distributions {
657*2c2f96dcSApple OSS Distributions if (cp->mc_slab_notify != NULL) {
658*2c2f96dcSApple OSS Distributions (*cp->mc_slab_notify)(cp->mc_private, event);
659*2c2f96dcSApple OSS Distributions }
660*2c2f96dcSApple OSS Distributions }
661*2c2f96dcSApple OSS Distributions
662*2c2f96dcSApple OSS Distributions /*
663*2c2f96dcSApple OSS Distributions * Purge the cache and disable its buckets.
664*2c2f96dcSApple OSS Distributions */
665*2c2f96dcSApple OSS Distributions static void
mcache_purge(void * arg)666*2c2f96dcSApple OSS Distributions mcache_purge(void *arg)
667*2c2f96dcSApple OSS Distributions {
668*2c2f96dcSApple OSS Distributions mcache_t *cp = arg;
669*2c2f96dcSApple OSS Distributions
670*2c2f96dcSApple OSS Distributions mcache_bkt_purge(cp);
671*2c2f96dcSApple OSS Distributions /*
672*2c2f96dcSApple OSS Distributions * We cannot simply call mcache_cache_bkt_enable() from here as
673*2c2f96dcSApple OSS Distributions * a bucket resize may be in flight and we would cause the CPU
674*2c2f96dcSApple OSS Distributions * layers of the cache to point to different sizes. Therefore,
675*2c2f96dcSApple OSS Distributions * we simply increment the enable count so that during the next
676*2c2f96dcSApple OSS Distributions * periodic cache update the buckets can be reenabled.
677*2c2f96dcSApple OSS Distributions */
678*2c2f96dcSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
679*2c2f96dcSApple OSS Distributions cp->mc_enable_cnt++;
680*2c2f96dcSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
681*2c2f96dcSApple OSS Distributions }
682*2c2f96dcSApple OSS Distributions
683*2c2f96dcSApple OSS Distributions __private_extern__ boolean_t
mcache_purge_cache(mcache_t * cp,boolean_t async)684*2c2f96dcSApple OSS Distributions mcache_purge_cache(mcache_t *cp, boolean_t async)
685*2c2f96dcSApple OSS Distributions {
686*2c2f96dcSApple OSS Distributions /*
687*2c2f96dcSApple OSS Distributions * Purging a cache that has no per-CPU caches or is already
688*2c2f96dcSApple OSS Distributions * in the process of being purged is rather pointless.
689*2c2f96dcSApple OSS Distributions */
690*2c2f96dcSApple OSS Distributions if (cp->mc_flags & MCF_NOCPUCACHE) {
691*2c2f96dcSApple OSS Distributions return FALSE;
692*2c2f96dcSApple OSS Distributions }
693*2c2f96dcSApple OSS Distributions
694*2c2f96dcSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
695*2c2f96dcSApple OSS Distributions if (cp->mc_purge_cnt > 0) {
696*2c2f96dcSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
697*2c2f96dcSApple OSS Distributions return FALSE;
698*2c2f96dcSApple OSS Distributions }
699*2c2f96dcSApple OSS Distributions cp->mc_purge_cnt++;
700*2c2f96dcSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
701*2c2f96dcSApple OSS Distributions
702*2c2f96dcSApple OSS Distributions if (async) {
703*2c2f96dcSApple OSS Distributions mcache_dispatch(mcache_purge, cp);
704*2c2f96dcSApple OSS Distributions } else {
705*2c2f96dcSApple OSS Distributions mcache_purge(cp);
706*2c2f96dcSApple OSS Distributions }
707*2c2f96dcSApple OSS Distributions
708*2c2f96dcSApple OSS Distributions return TRUE;
709*2c2f96dcSApple OSS Distributions }
710*2c2f96dcSApple OSS Distributions
711*2c2f96dcSApple OSS Distributions /*
712*2c2f96dcSApple OSS Distributions * Free a single object to a cache.
713*2c2f96dcSApple OSS Distributions */
714*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_free(mcache_t * cp,void * buf)715*2c2f96dcSApple OSS Distributions mcache_free(mcache_t *cp, void *buf)
716*2c2f96dcSApple OSS Distributions {
717*2c2f96dcSApple OSS Distributions ((mcache_obj_t *)buf)->obj_next = NULL;
718*2c2f96dcSApple OSS Distributions mcache_free_ext(cp, (mcache_obj_t *)buf);
719*2c2f96dcSApple OSS Distributions }
720*2c2f96dcSApple OSS Distributions
721*2c2f96dcSApple OSS Distributions /*
722*2c2f96dcSApple OSS Distributions * Free one or more objects to a cache.
723*2c2f96dcSApple OSS Distributions */
724*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_free_ext(mcache_t * cp,mcache_obj_t * list)725*2c2f96dcSApple OSS Distributions mcache_free_ext(mcache_t *cp, mcache_obj_t *list)
726*2c2f96dcSApple OSS Distributions {
727*2c2f96dcSApple OSS Distributions mcache_cpu_t *ccp = MCACHE_CPU(cp);
728*2c2f96dcSApple OSS Distributions mcache_bkttype_t *btp;
729*2c2f96dcSApple OSS Distributions mcache_obj_t *nlist;
730*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt;
731*2c2f96dcSApple OSS Distributions
732*2c2f96dcSApple OSS Distributions if (!(cp->mc_flags & MCF_NOLEAKLOG) && cp->mc_slab_log != NULL) {
733*2c2f96dcSApple OSS Distributions (*cp->mc_slab_log)(0, list, FALSE);
734*2c2f96dcSApple OSS Distributions }
735*2c2f96dcSApple OSS Distributions
736*2c2f96dcSApple OSS Distributions /* Invoke the slab layer audit callback if auditing is enabled */
737*2c2f96dcSApple OSS Distributions if ((cp->mc_flags & MCF_DEBUG) && cp->mc_slab_audit != NULL) {
738*2c2f96dcSApple OSS Distributions (*cp->mc_slab_audit)(cp->mc_private, list, FALSE);
739*2c2f96dcSApple OSS Distributions }
740*2c2f96dcSApple OSS Distributions
741*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
742*2c2f96dcSApple OSS Distributions for (;;) {
743*2c2f96dcSApple OSS Distributions /*
744*2c2f96dcSApple OSS Distributions * If there is space in the current CPU's filled bucket, put
745*2c2f96dcSApple OSS Distributions * the object there and return once all objects are freed.
746*2c2f96dcSApple OSS Distributions * Note the cast to unsigned integer takes care of the case
747*2c2f96dcSApple OSS Distributions * where the bucket layer is disabled (when cc_objs is -1).
748*2c2f96dcSApple OSS Distributions */
749*2c2f96dcSApple OSS Distributions if ((unsigned int)ccp->cc_objs <
750*2c2f96dcSApple OSS Distributions (unsigned int)ccp->cc_bktsize) {
751*2c2f96dcSApple OSS Distributions /*
752*2c2f96dcSApple OSS Distributions * Reverse the list while we place the object into the
753*2c2f96dcSApple OSS Distributions * bucket; this effectively causes the most recently
754*2c2f96dcSApple OSS Distributions * freed object(s) to be reused during allocation.
755*2c2f96dcSApple OSS Distributions */
756*2c2f96dcSApple OSS Distributions nlist = list->obj_next;
757*2c2f96dcSApple OSS Distributions list->obj_next = (ccp->cc_objs == 0) ? NULL :
758*2c2f96dcSApple OSS Distributions ccp->cc_filled->bkt_obj[ccp->cc_objs - 1];
759*2c2f96dcSApple OSS Distributions ccp->cc_filled->bkt_obj[ccp->cc_objs++] = list;
760*2c2f96dcSApple OSS Distributions ccp->cc_free++;
761*2c2f96dcSApple OSS Distributions
762*2c2f96dcSApple OSS Distributions if ((list = nlist) != NULL) {
763*2c2f96dcSApple OSS Distributions continue;
764*2c2f96dcSApple OSS Distributions }
765*2c2f96dcSApple OSS Distributions
766*2c2f96dcSApple OSS Distributions /* We are done; return to caller */
767*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
768*2c2f96dcSApple OSS Distributions
769*2c2f96dcSApple OSS Distributions /* If there is a waiter below, notify it */
770*2c2f96dcSApple OSS Distributions if (cp->mc_waiter_cnt > 0) {
771*2c2f96dcSApple OSS Distributions mcache_notify(cp, MCN_RETRYALLOC);
772*2c2f96dcSApple OSS Distributions }
773*2c2f96dcSApple OSS Distributions return;
774*2c2f96dcSApple OSS Distributions }
775*2c2f96dcSApple OSS Distributions
776*2c2f96dcSApple OSS Distributions /*
777*2c2f96dcSApple OSS Distributions * The CPU's filled bucket is full. If the previous filled
778*2c2f96dcSApple OSS Distributions * bucket was empty, exchange and try again.
779*2c2f96dcSApple OSS Distributions */
780*2c2f96dcSApple OSS Distributions if (ccp->cc_pobjs == 0) {
781*2c2f96dcSApple OSS Distributions mcache_cpu_refill(ccp, ccp->cc_pfilled, ccp->cc_pobjs);
782*2c2f96dcSApple OSS Distributions continue;
783*2c2f96dcSApple OSS Distributions }
784*2c2f96dcSApple OSS Distributions
785*2c2f96dcSApple OSS Distributions /*
786*2c2f96dcSApple OSS Distributions * If the bucket layer is disabled, free to slab. This can
787*2c2f96dcSApple OSS Distributions * happen either because MCF_NOCPUCACHE is set, or because
788*2c2f96dcSApple OSS Distributions * the bucket layer is currently being resized.
789*2c2f96dcSApple OSS Distributions */
790*2c2f96dcSApple OSS Distributions if (ccp->cc_bktsize == 0) {
791*2c2f96dcSApple OSS Distributions break;
792*2c2f96dcSApple OSS Distributions }
793*2c2f96dcSApple OSS Distributions
794*2c2f96dcSApple OSS Distributions /*
795*2c2f96dcSApple OSS Distributions * Both of the CPU's buckets are full; try to get empty
796*2c2f96dcSApple OSS Distributions * buckets from the bucket layer. Upon success, empty this
797*2c2f96dcSApple OSS Distributions * CPU and place any full bucket into the full list.
798*2c2f96dcSApple OSS Distributions *
799*2c2f96dcSApple OSS Distributions * TODO: Because the caller currently doesn't indicate
800*2c2f96dcSApple OSS Distributions * the number of objects in the list, we choose the more
801*2c2f96dcSApple OSS Distributions * conservative approach of allocating only 1 empty
802*2c2f96dcSApple OSS Distributions * bucket (to prevent potential thrashing). Once we
803*2c2f96dcSApple OSS Distributions * have the object count, we can replace 1 with similar
804*2c2f96dcSApple OSS Distributions * logic as used in mcache_alloc_ext().
805*2c2f96dcSApple OSS Distributions */
806*2c2f96dcSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_empty, &bkt, 1);
807*2c2f96dcSApple OSS Distributions if (bkt != NULL) {
808*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt_list = NULL;
809*2c2f96dcSApple OSS Distributions
810*2c2f96dcSApple OSS Distributions if (ccp->cc_pfilled != NULL) {
811*2c2f96dcSApple OSS Distributions ccp->cc_pfilled->bkt_next = bkt_list;
812*2c2f96dcSApple OSS Distributions bkt_list = ccp->cc_pfilled;
813*2c2f96dcSApple OSS Distributions }
814*2c2f96dcSApple OSS Distributions if (bkt->bkt_next == NULL) {
815*2c2f96dcSApple OSS Distributions /*
816*2c2f96dcSApple OSS Distributions * Bucket layer allocation returns only 1
817*2c2f96dcSApple OSS Distributions * bucket; retain current full bucket.
818*2c2f96dcSApple OSS Distributions */
819*2c2f96dcSApple OSS Distributions mcache_cpu_refill(ccp, bkt, 0);
820*2c2f96dcSApple OSS Distributions } else {
821*2c2f96dcSApple OSS Distributions /*
822*2c2f96dcSApple OSS Distributions * We got 2 empty buckets from the bucket
823*2c2f96dcSApple OSS Distributions * layer; release the current full bucket
824*2c2f96dcSApple OSS Distributions * back to the bucket layer.
825*2c2f96dcSApple OSS Distributions */
826*2c2f96dcSApple OSS Distributions if (ccp->cc_filled != NULL) {
827*2c2f96dcSApple OSS Distributions ccp->cc_filled->bkt_next = bkt_list;
828*2c2f96dcSApple OSS Distributions bkt_list = ccp->cc_filled;
829*2c2f96dcSApple OSS Distributions }
830*2c2f96dcSApple OSS Distributions mcache_cpu_batch_refill(ccp, bkt, 0);
831*2c2f96dcSApple OSS Distributions }
832*2c2f96dcSApple OSS Distributions mcache_bkt_batch_free(cp, &cp->mc_full, bkt_list);
833*2c2f96dcSApple OSS Distributions continue;
834*2c2f96dcSApple OSS Distributions }
835*2c2f96dcSApple OSS Distributions btp = cp->cache_bkttype;
836*2c2f96dcSApple OSS Distributions
837*2c2f96dcSApple OSS Distributions /*
838*2c2f96dcSApple OSS Distributions * We need an empty bucket to put our freed objects into
839*2c2f96dcSApple OSS Distributions * but couldn't get an empty bucket from the bucket layer;
840*2c2f96dcSApple OSS Distributions * attempt to allocate one. We do not want to block for
841*2c2f96dcSApple OSS Distributions * allocation here, and if the bucket allocation fails
842*2c2f96dcSApple OSS Distributions * we will simply fall through to the slab layer.
843*2c2f96dcSApple OSS Distributions */
844*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
845*2c2f96dcSApple OSS Distributions bkt = mcache_alloc(btp->bt_cache, MCR_NOSLEEP);
846*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
847*2c2f96dcSApple OSS Distributions
848*2c2f96dcSApple OSS Distributions if (bkt != NULL) {
849*2c2f96dcSApple OSS Distributions /*
850*2c2f96dcSApple OSS Distributions * We have an empty bucket, but since we drop the
851*2c2f96dcSApple OSS Distributions * CPU lock above, the cache's bucket size may have
852*2c2f96dcSApple OSS Distributions * changed. If so, free the bucket and try again.
853*2c2f96dcSApple OSS Distributions */
854*2c2f96dcSApple OSS Distributions if (ccp->cc_bktsize != btp->bt_bktsize) {
855*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
856*2c2f96dcSApple OSS Distributions mcache_free(btp->bt_cache, bkt);
857*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
858*2c2f96dcSApple OSS Distributions continue;
859*2c2f96dcSApple OSS Distributions }
860*2c2f96dcSApple OSS Distributions
861*2c2f96dcSApple OSS Distributions /*
862*2c2f96dcSApple OSS Distributions * Store it in the bucket object since we'll
863*2c2f96dcSApple OSS Distributions * need to refer to it during bucket destroy;
864*2c2f96dcSApple OSS Distributions * we can't safely refer to cache_bkttype as
865*2c2f96dcSApple OSS Distributions * the bucket lock may not be acquired then.
866*2c2f96dcSApple OSS Distributions */
867*2c2f96dcSApple OSS Distributions bkt->bkt_type = btp;
868*2c2f96dcSApple OSS Distributions
869*2c2f96dcSApple OSS Distributions /*
870*2c2f96dcSApple OSS Distributions * We have an empty bucket of the right size;
871*2c2f96dcSApple OSS Distributions * add it to the bucket layer and try again.
872*2c2f96dcSApple OSS Distributions */
873*2c2f96dcSApple OSS Distributions ASSERT(bkt->bkt_next == NULL);
874*2c2f96dcSApple OSS Distributions mcache_bkt_batch_free(cp, &cp->mc_empty, bkt);
875*2c2f96dcSApple OSS Distributions continue;
876*2c2f96dcSApple OSS Distributions }
877*2c2f96dcSApple OSS Distributions
878*2c2f96dcSApple OSS Distributions /*
879*2c2f96dcSApple OSS Distributions * The bucket layer has no empty buckets; free the
880*2c2f96dcSApple OSS Distributions * object(s) directly to the slab layer.
881*2c2f96dcSApple OSS Distributions */
882*2c2f96dcSApple OSS Distributions break;
883*2c2f96dcSApple OSS Distributions }
884*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
885*2c2f96dcSApple OSS Distributions
886*2c2f96dcSApple OSS Distributions /* If there is a waiter below, notify it */
887*2c2f96dcSApple OSS Distributions if (cp->mc_waiter_cnt > 0) {
888*2c2f96dcSApple OSS Distributions mcache_notify(cp, MCN_RETRYALLOC);
889*2c2f96dcSApple OSS Distributions }
890*2c2f96dcSApple OSS Distributions
891*2c2f96dcSApple OSS Distributions /* Advise the slab layer to purge the object(s) */
892*2c2f96dcSApple OSS Distributions (*cp->mc_slab_free)(cp->mc_private, list,
893*2c2f96dcSApple OSS Distributions (cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
894*2c2f96dcSApple OSS Distributions }
895*2c2f96dcSApple OSS Distributions
896*2c2f96dcSApple OSS Distributions /*
897*2c2f96dcSApple OSS Distributions * Cache destruction routine.
898*2c2f96dcSApple OSS Distributions */
899*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_destroy(mcache_t * cp)900*2c2f96dcSApple OSS Distributions mcache_destroy(mcache_t *cp)
901*2c2f96dcSApple OSS Distributions {
902*2c2f96dcSApple OSS Distributions void **pbuf;
903*2c2f96dcSApple OSS Distributions
904*2c2f96dcSApple OSS Distributions MCACHE_LIST_LOCK();
905*2c2f96dcSApple OSS Distributions LIST_REMOVE(cp, mc_list);
906*2c2f96dcSApple OSS Distributions MCACHE_LIST_UNLOCK();
907*2c2f96dcSApple OSS Distributions
908*2c2f96dcSApple OSS Distributions mcache_bkt_purge(cp);
909*2c2f96dcSApple OSS Distributions
910*2c2f96dcSApple OSS Distributions /*
911*2c2f96dcSApple OSS Distributions * This cache is dead; there should be no further transaction.
912*2c2f96dcSApple OSS Distributions * If it's still invoked, make sure that it induces a fault.
913*2c2f96dcSApple OSS Distributions */
914*2c2f96dcSApple OSS Distributions cp->mc_slab_alloc = NULL;
915*2c2f96dcSApple OSS Distributions cp->mc_slab_free = NULL;
916*2c2f96dcSApple OSS Distributions cp->mc_slab_audit = NULL;
917*2c2f96dcSApple OSS Distributions
918*2c2f96dcSApple OSS Distributions lck_grp_free(cp->mc_bkt_lock_grp);
919*2c2f96dcSApple OSS Distributions lck_grp_free(cp->mc_cpu_lock_grp);
920*2c2f96dcSApple OSS Distributions lck_grp_free(cp->mc_sync_lock_grp);
921*2c2f96dcSApple OSS Distributions
922*2c2f96dcSApple OSS Distributions /*
923*2c2f96dcSApple OSS Distributions * TODO: We need to destroy the zone here, but cannot do it
924*2c2f96dcSApple OSS Distributions * because there is no such way to achieve that. Until then
925*2c2f96dcSApple OSS Distributions * the memory allocated for the zone structure is leaked.
926*2c2f96dcSApple OSS Distributions * Once it is achievable, uncomment these lines:
927*2c2f96dcSApple OSS Distributions *
928*2c2f96dcSApple OSS Distributions * if (cp->mc_slab_zone != NULL) {
929*2c2f96dcSApple OSS Distributions * zdestroy(cp->mc_slab_zone);
930*2c2f96dcSApple OSS Distributions * cp->mc_slab_zone = NULL;
931*2c2f96dcSApple OSS Distributions * }
932*2c2f96dcSApple OSS Distributions */
933*2c2f96dcSApple OSS Distributions
934*2c2f96dcSApple OSS Distributions /* Get the original address since we're about to free it */
935*2c2f96dcSApple OSS Distributions pbuf = (void **)((intptr_t)cp - sizeof(void *));
936*2c2f96dcSApple OSS Distributions
937*2c2f96dcSApple OSS Distributions zfree(mcache_zone, *pbuf);
938*2c2f96dcSApple OSS Distributions }
939*2c2f96dcSApple OSS Distributions
940*2c2f96dcSApple OSS Distributions /*
941*2c2f96dcSApple OSS Distributions * Internal slab allocator used as a backend for simple caches. The current
942*2c2f96dcSApple OSS Distributions * implementation uses the zone allocator for simplicity reasons.
943*2c2f96dcSApple OSS Distributions */
944*2c2f96dcSApple OSS Distributions static unsigned int
mcache_slab_alloc(void * arg,mcache_obj_t *** plist,unsigned int num,int wait)945*2c2f96dcSApple OSS Distributions mcache_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num,
946*2c2f96dcSApple OSS Distributions int wait)
947*2c2f96dcSApple OSS Distributions {
948*2c2f96dcSApple OSS Distributions #pragma unused(wait)
949*2c2f96dcSApple OSS Distributions mcache_t *cp = arg;
950*2c2f96dcSApple OSS Distributions unsigned int need = num;
951*2c2f96dcSApple OSS Distributions size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
952*2c2f96dcSApple OSS Distributions u_int32_t flags = cp->mc_flags;
953*2c2f96dcSApple OSS Distributions void *buf, *base, **pbuf;
954*2c2f96dcSApple OSS Distributions mcache_obj_t **list = *plist;
955*2c2f96dcSApple OSS Distributions
956*2c2f96dcSApple OSS Distributions *list = NULL;
957*2c2f96dcSApple OSS Distributions
958*2c2f96dcSApple OSS Distributions for (;;) {
959*2c2f96dcSApple OSS Distributions buf = zalloc_flags(cp->mc_slab_zone, Z_WAITOK | Z_NOFAIL);
960*2c2f96dcSApple OSS Distributions
961*2c2f96dcSApple OSS Distributions /* Get the aligned base address for this object */
962*2c2f96dcSApple OSS Distributions base = (void *)P2ROUNDUP((intptr_t)buf + sizeof(u_int64_t),
963*2c2f96dcSApple OSS Distributions cp->mc_align);
964*2c2f96dcSApple OSS Distributions
965*2c2f96dcSApple OSS Distributions /*
966*2c2f96dcSApple OSS Distributions * Wind back a pointer size from the aligned base and
967*2c2f96dcSApple OSS Distributions * save the original address so we can free it later.
968*2c2f96dcSApple OSS Distributions */
969*2c2f96dcSApple OSS Distributions pbuf = (void **)((intptr_t)base - sizeof(void *));
970*2c2f96dcSApple OSS Distributions *pbuf = buf;
971*2c2f96dcSApple OSS Distributions
972*2c2f96dcSApple OSS Distributions VERIFY(((intptr_t)base + cp->mc_bufsize) <=
973*2c2f96dcSApple OSS Distributions ((intptr_t)buf + cp->mc_chunksize));
974*2c2f96dcSApple OSS Distributions
975*2c2f96dcSApple OSS Distributions /*
976*2c2f96dcSApple OSS Distributions * If auditing is enabled, patternize the contents of
977*2c2f96dcSApple OSS Distributions * the buffer starting from the 64-bit aligned base to
978*2c2f96dcSApple OSS Distributions * the end of the buffer; the length is rounded up to
979*2c2f96dcSApple OSS Distributions * the nearest 64-bit multiply; this is because we use
980*2c2f96dcSApple OSS Distributions * 64-bit memory access to set/check the pattern.
981*2c2f96dcSApple OSS Distributions */
982*2c2f96dcSApple OSS Distributions if (flags & MCF_DEBUG) {
983*2c2f96dcSApple OSS Distributions VERIFY(((intptr_t)base + rsize) <=
984*2c2f96dcSApple OSS Distributions ((intptr_t)buf + cp->mc_chunksize));
985*2c2f96dcSApple OSS Distributions mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
986*2c2f96dcSApple OSS Distributions }
987*2c2f96dcSApple OSS Distributions
988*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(base, cp->mc_align));
989*2c2f96dcSApple OSS Distributions *list = (mcache_obj_t *)base;
990*2c2f96dcSApple OSS Distributions
991*2c2f96dcSApple OSS Distributions (*list)->obj_next = NULL;
992*2c2f96dcSApple OSS Distributions list = *plist = &(*list)->obj_next;
993*2c2f96dcSApple OSS Distributions
994*2c2f96dcSApple OSS Distributions /* If we got them all, return to mcache */
995*2c2f96dcSApple OSS Distributions if (--need == 0) {
996*2c2f96dcSApple OSS Distributions break;
997*2c2f96dcSApple OSS Distributions }
998*2c2f96dcSApple OSS Distributions }
999*2c2f96dcSApple OSS Distributions
1000*2c2f96dcSApple OSS Distributions return num - need;
1001*2c2f96dcSApple OSS Distributions }
1002*2c2f96dcSApple OSS Distributions
1003*2c2f96dcSApple OSS Distributions /*
1004*2c2f96dcSApple OSS Distributions * Internal slab deallocator used as a backend for simple caches.
1005*2c2f96dcSApple OSS Distributions */
1006*2c2f96dcSApple OSS Distributions static void
mcache_slab_free(void * arg,mcache_obj_t * list,__unused boolean_t purged)1007*2c2f96dcSApple OSS Distributions mcache_slab_free(void *arg, mcache_obj_t *list, __unused boolean_t purged)
1008*2c2f96dcSApple OSS Distributions {
1009*2c2f96dcSApple OSS Distributions mcache_t *cp = arg;
1010*2c2f96dcSApple OSS Distributions mcache_obj_t *nlist;
1011*2c2f96dcSApple OSS Distributions size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
1012*2c2f96dcSApple OSS Distributions u_int32_t flags = cp->mc_flags;
1013*2c2f96dcSApple OSS Distributions void *base;
1014*2c2f96dcSApple OSS Distributions void **pbuf;
1015*2c2f96dcSApple OSS Distributions
1016*2c2f96dcSApple OSS Distributions for (;;) {
1017*2c2f96dcSApple OSS Distributions nlist = list->obj_next;
1018*2c2f96dcSApple OSS Distributions list->obj_next = NULL;
1019*2c2f96dcSApple OSS Distributions
1020*2c2f96dcSApple OSS Distributions base = list;
1021*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(base, cp->mc_align));
1022*2c2f96dcSApple OSS Distributions
1023*2c2f96dcSApple OSS Distributions /* Get the original address since we're about to free it */
1024*2c2f96dcSApple OSS Distributions pbuf = (void **)((intptr_t)base - sizeof(void *));
1025*2c2f96dcSApple OSS Distributions
1026*2c2f96dcSApple OSS Distributions VERIFY(((intptr_t)base + cp->mc_bufsize) <=
1027*2c2f96dcSApple OSS Distributions ((intptr_t)*pbuf + cp->mc_chunksize));
1028*2c2f96dcSApple OSS Distributions
1029*2c2f96dcSApple OSS Distributions if (flags & MCF_DEBUG) {
1030*2c2f96dcSApple OSS Distributions VERIFY(((intptr_t)base + rsize) <=
1031*2c2f96dcSApple OSS Distributions ((intptr_t)*pbuf + cp->mc_chunksize));
1032*2c2f96dcSApple OSS Distributions mcache_audit_free_verify(NULL, base, 0, rsize);
1033*2c2f96dcSApple OSS Distributions }
1034*2c2f96dcSApple OSS Distributions
1035*2c2f96dcSApple OSS Distributions /* Free it to zone */
1036*2c2f96dcSApple OSS Distributions zfree(cp->mc_slab_zone, *pbuf);
1037*2c2f96dcSApple OSS Distributions
1038*2c2f96dcSApple OSS Distributions /* No more objects to free; return to mcache */
1039*2c2f96dcSApple OSS Distributions if ((list = nlist) == NULL) {
1040*2c2f96dcSApple OSS Distributions break;
1041*2c2f96dcSApple OSS Distributions }
1042*2c2f96dcSApple OSS Distributions }
1043*2c2f96dcSApple OSS Distributions }
1044*2c2f96dcSApple OSS Distributions
1045*2c2f96dcSApple OSS Distributions /*
1046*2c2f96dcSApple OSS Distributions * Internal slab auditor for simple caches.
1047*2c2f96dcSApple OSS Distributions */
1048*2c2f96dcSApple OSS Distributions static void
mcache_slab_audit(void * arg,mcache_obj_t * list,boolean_t alloc)1049*2c2f96dcSApple OSS Distributions mcache_slab_audit(void *arg, mcache_obj_t *list, boolean_t alloc)
1050*2c2f96dcSApple OSS Distributions {
1051*2c2f96dcSApple OSS Distributions mcache_t *cp = arg;
1052*2c2f96dcSApple OSS Distributions size_t rsize = P2ROUNDUP(cp->mc_bufsize, sizeof(u_int64_t));
1053*2c2f96dcSApple OSS Distributions void *base, **pbuf;
1054*2c2f96dcSApple OSS Distributions
1055*2c2f96dcSApple OSS Distributions while (list != NULL) {
1056*2c2f96dcSApple OSS Distributions mcache_obj_t *next = list->obj_next;
1057*2c2f96dcSApple OSS Distributions
1058*2c2f96dcSApple OSS Distributions base = list;
1059*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(base, cp->mc_align));
1060*2c2f96dcSApple OSS Distributions
1061*2c2f96dcSApple OSS Distributions /* Get the original address */
1062*2c2f96dcSApple OSS Distributions pbuf = (void **)((intptr_t)base - sizeof(void *));
1063*2c2f96dcSApple OSS Distributions
1064*2c2f96dcSApple OSS Distributions VERIFY(((intptr_t)base + rsize) <=
1065*2c2f96dcSApple OSS Distributions ((intptr_t)*pbuf + cp->mc_chunksize));
1066*2c2f96dcSApple OSS Distributions
1067*2c2f96dcSApple OSS Distributions if (!alloc) {
1068*2c2f96dcSApple OSS Distributions mcache_set_pattern(MCACHE_FREE_PATTERN, base, rsize);
1069*2c2f96dcSApple OSS Distributions } else {
1070*2c2f96dcSApple OSS Distributions mcache_audit_free_verify_set(NULL, base, 0, rsize);
1071*2c2f96dcSApple OSS Distributions }
1072*2c2f96dcSApple OSS Distributions
1073*2c2f96dcSApple OSS Distributions list = list->obj_next = next;
1074*2c2f96dcSApple OSS Distributions }
1075*2c2f96dcSApple OSS Distributions }
1076*2c2f96dcSApple OSS Distributions
1077*2c2f96dcSApple OSS Distributions /*
1078*2c2f96dcSApple OSS Distributions * Refill the CPU's buckets with bkt and its follower (if any).
1079*2c2f96dcSApple OSS Distributions */
1080*2c2f96dcSApple OSS Distributions static void
mcache_cpu_batch_refill(mcache_cpu_t * ccp,mcache_bkt_t * bkt,int objs)1081*2c2f96dcSApple OSS Distributions mcache_cpu_batch_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
1082*2c2f96dcSApple OSS Distributions {
1083*2c2f96dcSApple OSS Distributions ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
1084*2c2f96dcSApple OSS Distributions (ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
1085*2c2f96dcSApple OSS Distributions ASSERT(ccp->cc_bktsize > 0);
1086*2c2f96dcSApple OSS Distributions
1087*2c2f96dcSApple OSS Distributions ccp->cc_filled = bkt;
1088*2c2f96dcSApple OSS Distributions ccp->cc_objs = objs;
1089*2c2f96dcSApple OSS Distributions if (__probable(bkt->bkt_next != NULL)) {
1090*2c2f96dcSApple OSS Distributions ccp->cc_pfilled = bkt->bkt_next;
1091*2c2f96dcSApple OSS Distributions ccp->cc_pobjs = objs;
1092*2c2f96dcSApple OSS Distributions bkt->bkt_next = NULL;
1093*2c2f96dcSApple OSS Distributions } else {
1094*2c2f96dcSApple OSS Distributions ASSERT(bkt->bkt_next == NULL);
1095*2c2f96dcSApple OSS Distributions ccp->cc_pfilled = NULL;
1096*2c2f96dcSApple OSS Distributions ccp->cc_pobjs = -1;
1097*2c2f96dcSApple OSS Distributions }
1098*2c2f96dcSApple OSS Distributions }
1099*2c2f96dcSApple OSS Distributions
1100*2c2f96dcSApple OSS Distributions /*
1101*2c2f96dcSApple OSS Distributions * Refill the CPU's filled bucket with bkt and save the previous one.
1102*2c2f96dcSApple OSS Distributions */
1103*2c2f96dcSApple OSS Distributions static void
mcache_cpu_refill(mcache_cpu_t * ccp,mcache_bkt_t * bkt,int objs)1104*2c2f96dcSApple OSS Distributions mcache_cpu_refill(mcache_cpu_t *ccp, mcache_bkt_t *bkt, int objs)
1105*2c2f96dcSApple OSS Distributions {
1106*2c2f96dcSApple OSS Distributions ASSERT((ccp->cc_filled == NULL && ccp->cc_objs == -1) ||
1107*2c2f96dcSApple OSS Distributions (ccp->cc_filled && ccp->cc_objs + objs == ccp->cc_bktsize));
1108*2c2f96dcSApple OSS Distributions ASSERT(ccp->cc_bktsize > 0);
1109*2c2f96dcSApple OSS Distributions
1110*2c2f96dcSApple OSS Distributions ccp->cc_pfilled = ccp->cc_filled;
1111*2c2f96dcSApple OSS Distributions ccp->cc_pobjs = ccp->cc_objs;
1112*2c2f96dcSApple OSS Distributions ccp->cc_filled = bkt;
1113*2c2f96dcSApple OSS Distributions ccp->cc_objs = objs;
1114*2c2f96dcSApple OSS Distributions }
1115*2c2f96dcSApple OSS Distributions
1116*2c2f96dcSApple OSS Distributions /*
1117*2c2f96dcSApple OSS Distributions * Get one or more buckets from the bucket layer.
1118*2c2f96dcSApple OSS Distributions */
1119*2c2f96dcSApple OSS Distributions static uint32_t
mcache_bkt_batch_alloc(mcache_t * cp,mcache_bktlist_t * blp,mcache_bkt_t ** list,uint32_t num)1120*2c2f96dcSApple OSS Distributions mcache_bkt_batch_alloc(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t **list,
1121*2c2f96dcSApple OSS Distributions uint32_t num)
1122*2c2f96dcSApple OSS Distributions {
1123*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt_list = NULL;
1124*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt;
1125*2c2f96dcSApple OSS Distributions uint32_t need = num;
1126*2c2f96dcSApple OSS Distributions
1127*2c2f96dcSApple OSS Distributions ASSERT(list != NULL && need > 0);
1128*2c2f96dcSApple OSS Distributions
1129*2c2f96dcSApple OSS Distributions if (!MCACHE_LOCK_TRY(&cp->mc_bkt_lock)) {
1130*2c2f96dcSApple OSS Distributions /*
1131*2c2f96dcSApple OSS Distributions * The bucket layer lock is held by another CPU; increase
1132*2c2f96dcSApple OSS Distributions * the contention count so that we can later resize the
1133*2c2f96dcSApple OSS Distributions * bucket size accordingly.
1134*2c2f96dcSApple OSS Distributions */
1135*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1136*2c2f96dcSApple OSS Distributions cp->mc_bkt_contention++;
1137*2c2f96dcSApple OSS Distributions }
1138*2c2f96dcSApple OSS Distributions
1139*2c2f96dcSApple OSS Distributions while ((bkt = blp->bl_list) != NULL) {
1140*2c2f96dcSApple OSS Distributions blp->bl_list = bkt->bkt_next;
1141*2c2f96dcSApple OSS Distributions bkt->bkt_next = bkt_list;
1142*2c2f96dcSApple OSS Distributions bkt_list = bkt;
1143*2c2f96dcSApple OSS Distributions if (--blp->bl_total < blp->bl_min) {
1144*2c2f96dcSApple OSS Distributions blp->bl_min = blp->bl_total;
1145*2c2f96dcSApple OSS Distributions }
1146*2c2f96dcSApple OSS Distributions blp->bl_alloc++;
1147*2c2f96dcSApple OSS Distributions if (--need == 0) {
1148*2c2f96dcSApple OSS Distributions break;
1149*2c2f96dcSApple OSS Distributions }
1150*2c2f96dcSApple OSS Distributions }
1151*2c2f96dcSApple OSS Distributions
1152*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1153*2c2f96dcSApple OSS Distributions
1154*2c2f96dcSApple OSS Distributions *list = bkt_list;
1155*2c2f96dcSApple OSS Distributions
1156*2c2f96dcSApple OSS Distributions return num - need;
1157*2c2f96dcSApple OSS Distributions }
1158*2c2f96dcSApple OSS Distributions
1159*2c2f96dcSApple OSS Distributions /*
1160*2c2f96dcSApple OSS Distributions * Return one or more buckets to the bucket layer.
1161*2c2f96dcSApple OSS Distributions */
1162*2c2f96dcSApple OSS Distributions static void
mcache_bkt_batch_free(mcache_t * cp,mcache_bktlist_t * blp,mcache_bkt_t * bkt)1163*2c2f96dcSApple OSS Distributions mcache_bkt_batch_free(mcache_t *cp, mcache_bktlist_t *blp, mcache_bkt_t *bkt)
1164*2c2f96dcSApple OSS Distributions {
1165*2c2f96dcSApple OSS Distributions mcache_bkt_t *nbkt;
1166*2c2f96dcSApple OSS Distributions
1167*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1168*2c2f96dcSApple OSS Distributions while (bkt != NULL) {
1169*2c2f96dcSApple OSS Distributions nbkt = bkt->bkt_next;
1170*2c2f96dcSApple OSS Distributions bkt->bkt_next = blp->bl_list;
1171*2c2f96dcSApple OSS Distributions blp->bl_list = bkt;
1172*2c2f96dcSApple OSS Distributions blp->bl_total++;
1173*2c2f96dcSApple OSS Distributions bkt = nbkt;
1174*2c2f96dcSApple OSS Distributions }
1175*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1176*2c2f96dcSApple OSS Distributions }
1177*2c2f96dcSApple OSS Distributions
1178*2c2f96dcSApple OSS Distributions /*
1179*2c2f96dcSApple OSS Distributions * Enable the bucket layer of a cache.
1180*2c2f96dcSApple OSS Distributions */
1181*2c2f96dcSApple OSS Distributions static void
mcache_cache_bkt_enable(mcache_t * cp)1182*2c2f96dcSApple OSS Distributions mcache_cache_bkt_enable(mcache_t *cp)
1183*2c2f96dcSApple OSS Distributions {
1184*2c2f96dcSApple OSS Distributions mcache_cpu_t *ccp;
1185*2c2f96dcSApple OSS Distributions unsigned int cpu;
1186*2c2f96dcSApple OSS Distributions
1187*2c2f96dcSApple OSS Distributions if (cp->mc_flags & MCF_NOCPUCACHE) {
1188*2c2f96dcSApple OSS Distributions return;
1189*2c2f96dcSApple OSS Distributions }
1190*2c2f96dcSApple OSS Distributions
1191*2c2f96dcSApple OSS Distributions for (cpu = 0; cpu < ncpu; cpu++) {
1192*2c2f96dcSApple OSS Distributions ccp = &cp->mc_cpu[cpu];
1193*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
1194*2c2f96dcSApple OSS Distributions ccp->cc_bktsize = cp->cache_bkttype->bt_bktsize;
1195*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
1196*2c2f96dcSApple OSS Distributions }
1197*2c2f96dcSApple OSS Distributions }
1198*2c2f96dcSApple OSS Distributions
1199*2c2f96dcSApple OSS Distributions /*
1200*2c2f96dcSApple OSS Distributions * Purge all buckets from a cache and disable its bucket layer.
1201*2c2f96dcSApple OSS Distributions */
1202*2c2f96dcSApple OSS Distributions static void
mcache_bkt_purge(mcache_t * cp)1203*2c2f96dcSApple OSS Distributions mcache_bkt_purge(mcache_t *cp)
1204*2c2f96dcSApple OSS Distributions {
1205*2c2f96dcSApple OSS Distributions mcache_cpu_t *ccp;
1206*2c2f96dcSApple OSS Distributions mcache_bkt_t *bp, *pbp;
1207*2c2f96dcSApple OSS Distributions int objs, pobjs;
1208*2c2f96dcSApple OSS Distributions unsigned int cpu;
1209*2c2f96dcSApple OSS Distributions
1210*2c2f96dcSApple OSS Distributions for (cpu = 0; cpu < ncpu; cpu++) {
1211*2c2f96dcSApple OSS Distributions ccp = &cp->mc_cpu[cpu];
1212*2c2f96dcSApple OSS Distributions
1213*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&ccp->cc_lock);
1214*2c2f96dcSApple OSS Distributions
1215*2c2f96dcSApple OSS Distributions bp = ccp->cc_filled;
1216*2c2f96dcSApple OSS Distributions pbp = ccp->cc_pfilled;
1217*2c2f96dcSApple OSS Distributions objs = ccp->cc_objs;
1218*2c2f96dcSApple OSS Distributions pobjs = ccp->cc_pobjs;
1219*2c2f96dcSApple OSS Distributions ccp->cc_filled = NULL;
1220*2c2f96dcSApple OSS Distributions ccp->cc_pfilled = NULL;
1221*2c2f96dcSApple OSS Distributions ccp->cc_objs = -1;
1222*2c2f96dcSApple OSS Distributions ccp->cc_pobjs = -1;
1223*2c2f96dcSApple OSS Distributions ccp->cc_bktsize = 0;
1224*2c2f96dcSApple OSS Distributions
1225*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&ccp->cc_lock);
1226*2c2f96dcSApple OSS Distributions
1227*2c2f96dcSApple OSS Distributions if (bp != NULL) {
1228*2c2f96dcSApple OSS Distributions mcache_bkt_destroy(cp, bp, objs);
1229*2c2f96dcSApple OSS Distributions }
1230*2c2f96dcSApple OSS Distributions if (pbp != NULL) {
1231*2c2f96dcSApple OSS Distributions mcache_bkt_destroy(cp, pbp, pobjs);
1232*2c2f96dcSApple OSS Distributions }
1233*2c2f96dcSApple OSS Distributions }
1234*2c2f96dcSApple OSS Distributions
1235*2c2f96dcSApple OSS Distributions mcache_bkt_ws_zero(cp);
1236*2c2f96dcSApple OSS Distributions mcache_bkt_ws_reap(cp);
1237*2c2f96dcSApple OSS Distributions }
1238*2c2f96dcSApple OSS Distributions
1239*2c2f96dcSApple OSS Distributions /*
1240*2c2f96dcSApple OSS Distributions * Free one or more objects in the bucket to the slab layer,
1241*2c2f96dcSApple OSS Distributions * and also free the bucket itself.
1242*2c2f96dcSApple OSS Distributions */
1243*2c2f96dcSApple OSS Distributions static void
mcache_bkt_destroy(mcache_t * cp,mcache_bkt_t * bkt,int nobjs)1244*2c2f96dcSApple OSS Distributions mcache_bkt_destroy(mcache_t *cp, mcache_bkt_t *bkt, int nobjs)
1245*2c2f96dcSApple OSS Distributions {
1246*2c2f96dcSApple OSS Distributions if (nobjs > 0) {
1247*2c2f96dcSApple OSS Distributions mcache_obj_t *top = bkt->bkt_obj[nobjs - 1];
1248*2c2f96dcSApple OSS Distributions
1249*2c2f96dcSApple OSS Distributions if (cp->mc_flags & MCF_DEBUG) {
1250*2c2f96dcSApple OSS Distributions mcache_obj_t *o = top;
1251*2c2f96dcSApple OSS Distributions int cnt = 0;
1252*2c2f96dcSApple OSS Distributions
1253*2c2f96dcSApple OSS Distributions /*
1254*2c2f96dcSApple OSS Distributions * Verify that the chain of objects in the bucket is
1255*2c2f96dcSApple OSS Distributions * valid. Any mismatch here means a mistake when the
1256*2c2f96dcSApple OSS Distributions * object(s) were freed to the CPU layer, so we panic.
1257*2c2f96dcSApple OSS Distributions */
1258*2c2f96dcSApple OSS Distributions while (o != NULL) {
1259*2c2f96dcSApple OSS Distributions o = o->obj_next;
1260*2c2f96dcSApple OSS Distributions ++cnt;
1261*2c2f96dcSApple OSS Distributions }
1262*2c2f96dcSApple OSS Distributions if (cnt != nobjs) {
1263*2c2f96dcSApple OSS Distributions panic("mcache_bkt_destroy: %s cp %p corrupted "
1264*2c2f96dcSApple OSS Distributions "list in bkt %p (nobjs %d actual %d)\n",
1265*2c2f96dcSApple OSS Distributions cp->mc_name, (void *)cp, (void *)bkt,
1266*2c2f96dcSApple OSS Distributions nobjs, cnt);
1267*2c2f96dcSApple OSS Distributions /* NOTREACHED */
1268*2c2f96dcSApple OSS Distributions __builtin_unreachable();
1269*2c2f96dcSApple OSS Distributions }
1270*2c2f96dcSApple OSS Distributions }
1271*2c2f96dcSApple OSS Distributions
1272*2c2f96dcSApple OSS Distributions /* Advise the slab layer to purge the object(s) */
1273*2c2f96dcSApple OSS Distributions (*cp->mc_slab_free)(cp->mc_private, top,
1274*2c2f96dcSApple OSS Distributions (cp->mc_flags & MCF_DEBUG) || cp->mc_purge_cnt);
1275*2c2f96dcSApple OSS Distributions }
1276*2c2f96dcSApple OSS Distributions mcache_free(bkt->bkt_type->bt_cache, bkt);
1277*2c2f96dcSApple OSS Distributions }
1278*2c2f96dcSApple OSS Distributions
1279*2c2f96dcSApple OSS Distributions /*
1280*2c2f96dcSApple OSS Distributions * Update the bucket layer working set statistics.
1281*2c2f96dcSApple OSS Distributions */
1282*2c2f96dcSApple OSS Distributions static void
mcache_bkt_ws_update(mcache_t * cp)1283*2c2f96dcSApple OSS Distributions mcache_bkt_ws_update(mcache_t *cp)
1284*2c2f96dcSApple OSS Distributions {
1285*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1286*2c2f96dcSApple OSS Distributions
1287*2c2f96dcSApple OSS Distributions cp->mc_full.bl_reaplimit = cp->mc_full.bl_min;
1288*2c2f96dcSApple OSS Distributions cp->mc_full.bl_min = cp->mc_full.bl_total;
1289*2c2f96dcSApple OSS Distributions cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_min;
1290*2c2f96dcSApple OSS Distributions cp->mc_empty.bl_min = cp->mc_empty.bl_total;
1291*2c2f96dcSApple OSS Distributions
1292*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1293*2c2f96dcSApple OSS Distributions }
1294*2c2f96dcSApple OSS Distributions
1295*2c2f96dcSApple OSS Distributions /*
1296*2c2f96dcSApple OSS Distributions * Mark everything as eligible for reaping (working set is zero).
1297*2c2f96dcSApple OSS Distributions */
1298*2c2f96dcSApple OSS Distributions static void
mcache_bkt_ws_zero(mcache_t * cp)1299*2c2f96dcSApple OSS Distributions mcache_bkt_ws_zero(mcache_t *cp)
1300*2c2f96dcSApple OSS Distributions {
1301*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1302*2c2f96dcSApple OSS Distributions
1303*2c2f96dcSApple OSS Distributions cp->mc_full.bl_reaplimit = cp->mc_full.bl_total;
1304*2c2f96dcSApple OSS Distributions cp->mc_full.bl_min = cp->mc_full.bl_total;
1305*2c2f96dcSApple OSS Distributions cp->mc_empty.bl_reaplimit = cp->mc_empty.bl_total;
1306*2c2f96dcSApple OSS Distributions cp->mc_empty.bl_min = cp->mc_empty.bl_total;
1307*2c2f96dcSApple OSS Distributions
1308*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1309*2c2f96dcSApple OSS Distributions }
1310*2c2f96dcSApple OSS Distributions
1311*2c2f96dcSApple OSS Distributions /*
1312*2c2f96dcSApple OSS Distributions * Reap all buckets that are beyond the working set.
1313*2c2f96dcSApple OSS Distributions */
1314*2c2f96dcSApple OSS Distributions static void
mcache_bkt_ws_reap(mcache_t * cp)1315*2c2f96dcSApple OSS Distributions mcache_bkt_ws_reap(mcache_t *cp)
1316*2c2f96dcSApple OSS Distributions {
1317*2c2f96dcSApple OSS Distributions mcache_bkt_t *bkt, *nbkt;
1318*2c2f96dcSApple OSS Distributions uint32_t reap;
1319*2c2f96dcSApple OSS Distributions
1320*2c2f96dcSApple OSS Distributions reap = MIN(cp->mc_full.bl_reaplimit, cp->mc_full.bl_min);
1321*2c2f96dcSApple OSS Distributions if (reap != 0) {
1322*2c2f96dcSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_full, &bkt, reap);
1323*2c2f96dcSApple OSS Distributions while (bkt != NULL) {
1324*2c2f96dcSApple OSS Distributions nbkt = bkt->bkt_next;
1325*2c2f96dcSApple OSS Distributions bkt->bkt_next = NULL;
1326*2c2f96dcSApple OSS Distributions mcache_bkt_destroy(cp, bkt, bkt->bkt_type->bt_bktsize);
1327*2c2f96dcSApple OSS Distributions bkt = nbkt;
1328*2c2f96dcSApple OSS Distributions }
1329*2c2f96dcSApple OSS Distributions }
1330*2c2f96dcSApple OSS Distributions
1331*2c2f96dcSApple OSS Distributions reap = MIN(cp->mc_empty.bl_reaplimit, cp->mc_empty.bl_min);
1332*2c2f96dcSApple OSS Distributions if (reap != 0) {
1333*2c2f96dcSApple OSS Distributions (void) mcache_bkt_batch_alloc(cp, &cp->mc_empty, &bkt, reap);
1334*2c2f96dcSApple OSS Distributions while (bkt != NULL) {
1335*2c2f96dcSApple OSS Distributions nbkt = bkt->bkt_next;
1336*2c2f96dcSApple OSS Distributions bkt->bkt_next = NULL;
1337*2c2f96dcSApple OSS Distributions mcache_bkt_destroy(cp, bkt, 0);
1338*2c2f96dcSApple OSS Distributions bkt = nbkt;
1339*2c2f96dcSApple OSS Distributions }
1340*2c2f96dcSApple OSS Distributions }
1341*2c2f96dcSApple OSS Distributions }
1342*2c2f96dcSApple OSS Distributions
1343*2c2f96dcSApple OSS Distributions static void
mcache_reap_timeout(thread_call_param_t dummy __unused,thread_call_param_t arg)1344*2c2f96dcSApple OSS Distributions mcache_reap_timeout(thread_call_param_t dummy __unused,
1345*2c2f96dcSApple OSS Distributions thread_call_param_t arg)
1346*2c2f96dcSApple OSS Distributions {
1347*2c2f96dcSApple OSS Distributions volatile UInt32 *flag = arg;
1348*2c2f96dcSApple OSS Distributions
1349*2c2f96dcSApple OSS Distributions ASSERT(flag == &mcache_reaping);
1350*2c2f96dcSApple OSS Distributions
1351*2c2f96dcSApple OSS Distributions *flag = 0;
1352*2c2f96dcSApple OSS Distributions }
1353*2c2f96dcSApple OSS Distributions
1354*2c2f96dcSApple OSS Distributions static void
mcache_reap_done(void * flag)1355*2c2f96dcSApple OSS Distributions mcache_reap_done(void *flag)
1356*2c2f96dcSApple OSS Distributions {
1357*2c2f96dcSApple OSS Distributions uint64_t deadline, leeway;
1358*2c2f96dcSApple OSS Distributions
1359*2c2f96dcSApple OSS Distributions clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
1360*2c2f96dcSApple OSS Distributions &deadline);
1361*2c2f96dcSApple OSS Distributions clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
1362*2c2f96dcSApple OSS Distributions NSEC_PER_SEC, &leeway);
1363*2c2f96dcSApple OSS Distributions thread_call_enter_delayed_with_leeway(mcache_reap_tcall, flag,
1364*2c2f96dcSApple OSS Distributions deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1365*2c2f96dcSApple OSS Distributions }
1366*2c2f96dcSApple OSS Distributions
1367*2c2f96dcSApple OSS Distributions static void
mcache_reap_start(void * arg)1368*2c2f96dcSApple OSS Distributions mcache_reap_start(void *arg)
1369*2c2f96dcSApple OSS Distributions {
1370*2c2f96dcSApple OSS Distributions UInt32 *flag = arg;
1371*2c2f96dcSApple OSS Distributions
1372*2c2f96dcSApple OSS Distributions ASSERT(flag == &mcache_reaping);
1373*2c2f96dcSApple OSS Distributions
1374*2c2f96dcSApple OSS Distributions mcache_applyall(mcache_cache_reap);
1375*2c2f96dcSApple OSS Distributions mcache_dispatch(mcache_reap_done, flag);
1376*2c2f96dcSApple OSS Distributions }
1377*2c2f96dcSApple OSS Distributions
1378*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_reap(void)1379*2c2f96dcSApple OSS Distributions mcache_reap(void)
1380*2c2f96dcSApple OSS Distributions {
1381*2c2f96dcSApple OSS Distributions UInt32 *flag = &mcache_reaping;
1382*2c2f96dcSApple OSS Distributions
1383*2c2f96dcSApple OSS Distributions if (mcache_llock_owner == current_thread() ||
1384*2c2f96dcSApple OSS Distributions !OSCompareAndSwap(0, 1, flag)) {
1385*2c2f96dcSApple OSS Distributions return;
1386*2c2f96dcSApple OSS Distributions }
1387*2c2f96dcSApple OSS Distributions
1388*2c2f96dcSApple OSS Distributions mcache_dispatch(mcache_reap_start, flag);
1389*2c2f96dcSApple OSS Distributions }
1390*2c2f96dcSApple OSS Distributions
1391*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_reap_now(mcache_t * cp,boolean_t purge)1392*2c2f96dcSApple OSS Distributions mcache_reap_now(mcache_t *cp, boolean_t purge)
1393*2c2f96dcSApple OSS Distributions {
1394*2c2f96dcSApple OSS Distributions if (purge) {
1395*2c2f96dcSApple OSS Distributions mcache_bkt_purge(cp);
1396*2c2f96dcSApple OSS Distributions mcache_cache_bkt_enable(cp);
1397*2c2f96dcSApple OSS Distributions } else {
1398*2c2f96dcSApple OSS Distributions mcache_bkt_ws_zero(cp);
1399*2c2f96dcSApple OSS Distributions mcache_bkt_ws_reap(cp);
1400*2c2f96dcSApple OSS Distributions }
1401*2c2f96dcSApple OSS Distributions }
1402*2c2f96dcSApple OSS Distributions
1403*2c2f96dcSApple OSS Distributions static void
mcache_cache_reap(mcache_t * cp)1404*2c2f96dcSApple OSS Distributions mcache_cache_reap(mcache_t *cp)
1405*2c2f96dcSApple OSS Distributions {
1406*2c2f96dcSApple OSS Distributions mcache_bkt_ws_reap(cp);
1407*2c2f96dcSApple OSS Distributions }
1408*2c2f96dcSApple OSS Distributions
1409*2c2f96dcSApple OSS Distributions /*
1410*2c2f96dcSApple OSS Distributions * Performs period maintenance on a cache.
1411*2c2f96dcSApple OSS Distributions */
1412*2c2f96dcSApple OSS Distributions static void
mcache_cache_update(mcache_t * cp)1413*2c2f96dcSApple OSS Distributions mcache_cache_update(mcache_t *cp)
1414*2c2f96dcSApple OSS Distributions {
1415*2c2f96dcSApple OSS Distributions int need_bkt_resize = 0;
1416*2c2f96dcSApple OSS Distributions int need_bkt_reenable = 0;
1417*2c2f96dcSApple OSS Distributions
1418*2c2f96dcSApple OSS Distributions lck_mtx_assert(&mcache_llock, LCK_MTX_ASSERT_OWNED);
1419*2c2f96dcSApple OSS Distributions
1420*2c2f96dcSApple OSS Distributions mcache_bkt_ws_update(cp);
1421*2c2f96dcSApple OSS Distributions
1422*2c2f96dcSApple OSS Distributions /*
1423*2c2f96dcSApple OSS Distributions * Cache resize and post-purge reenable are mutually exclusive.
1424*2c2f96dcSApple OSS Distributions * If the cache was previously purged, there is no point of
1425*2c2f96dcSApple OSS Distributions * increasing the bucket size as there was an indication of
1426*2c2f96dcSApple OSS Distributions * memory pressure on the system.
1427*2c2f96dcSApple OSS Distributions */
1428*2c2f96dcSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
1429*2c2f96dcSApple OSS Distributions if (!(cp->mc_flags & MCF_NOCPUCACHE) && cp->mc_enable_cnt) {
1430*2c2f96dcSApple OSS Distributions need_bkt_reenable = 1;
1431*2c2f96dcSApple OSS Distributions }
1432*2c2f96dcSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
1433*2c2f96dcSApple OSS Distributions
1434*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1435*2c2f96dcSApple OSS Distributions /*
1436*2c2f96dcSApple OSS Distributions * If the contention count is greater than the threshold, and if
1437*2c2f96dcSApple OSS Distributions * we are not already at the maximum bucket size, increase it.
1438*2c2f96dcSApple OSS Distributions * Otherwise, if this cache was previously purged by the user
1439*2c2f96dcSApple OSS Distributions * then we simply reenable it.
1440*2c2f96dcSApple OSS Distributions */
1441*2c2f96dcSApple OSS Distributions if ((unsigned int)cp->mc_chunksize < cp->cache_bkttype->bt_maxbuf &&
1442*2c2f96dcSApple OSS Distributions (int)(cp->mc_bkt_contention - cp->mc_bkt_contention_prev) >
1443*2c2f96dcSApple OSS Distributions mcache_bkt_contention && !need_bkt_reenable) {
1444*2c2f96dcSApple OSS Distributions need_bkt_resize = 1;
1445*2c2f96dcSApple OSS Distributions }
1446*2c2f96dcSApple OSS Distributions
1447*2c2f96dcSApple OSS Distributions cp->mc_bkt_contention_prev = cp->mc_bkt_contention;
1448*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1449*2c2f96dcSApple OSS Distributions
1450*2c2f96dcSApple OSS Distributions if (need_bkt_resize) {
1451*2c2f96dcSApple OSS Distributions mcache_dispatch(mcache_cache_bkt_resize, cp);
1452*2c2f96dcSApple OSS Distributions } else if (need_bkt_reenable) {
1453*2c2f96dcSApple OSS Distributions mcache_dispatch(mcache_cache_enable, cp);
1454*2c2f96dcSApple OSS Distributions }
1455*2c2f96dcSApple OSS Distributions }
1456*2c2f96dcSApple OSS Distributions
1457*2c2f96dcSApple OSS Distributions /*
1458*2c2f96dcSApple OSS Distributions * Recompute a cache's bucket size. This is an expensive operation
1459*2c2f96dcSApple OSS Distributions * and should not be done frequently; larger buckets provide for a
1460*2c2f96dcSApple OSS Distributions * higher transfer rate with the bucket while smaller buckets reduce
1461*2c2f96dcSApple OSS Distributions * the memory consumption.
1462*2c2f96dcSApple OSS Distributions */
1463*2c2f96dcSApple OSS Distributions static void
mcache_cache_bkt_resize(void * arg)1464*2c2f96dcSApple OSS Distributions mcache_cache_bkt_resize(void *arg)
1465*2c2f96dcSApple OSS Distributions {
1466*2c2f96dcSApple OSS Distributions mcache_t *cp = arg;
1467*2c2f96dcSApple OSS Distributions mcache_bkttype_t *btp = cp->cache_bkttype;
1468*2c2f96dcSApple OSS Distributions
1469*2c2f96dcSApple OSS Distributions if ((unsigned int)cp->mc_chunksize < btp->bt_maxbuf) {
1470*2c2f96dcSApple OSS Distributions mcache_bkt_purge(cp);
1471*2c2f96dcSApple OSS Distributions
1472*2c2f96dcSApple OSS Distributions /*
1473*2c2f96dcSApple OSS Distributions * Upgrade to the next bucket type with larger bucket size;
1474*2c2f96dcSApple OSS Distributions * temporarily set the previous contention snapshot to a
1475*2c2f96dcSApple OSS Distributions * negative number to prevent unnecessary resize request.
1476*2c2f96dcSApple OSS Distributions */
1477*2c2f96dcSApple OSS Distributions MCACHE_LOCK(&cp->mc_bkt_lock);
1478*2c2f96dcSApple OSS Distributions cp->cache_bkttype = ++btp;
1479*2c2f96dcSApple OSS Distributions cp->mc_bkt_contention_prev = cp->mc_bkt_contention + INT_MAX;
1480*2c2f96dcSApple OSS Distributions MCACHE_UNLOCK(&cp->mc_bkt_lock);
1481*2c2f96dcSApple OSS Distributions
1482*2c2f96dcSApple OSS Distributions mcache_cache_enable(cp);
1483*2c2f96dcSApple OSS Distributions }
1484*2c2f96dcSApple OSS Distributions }
1485*2c2f96dcSApple OSS Distributions
1486*2c2f96dcSApple OSS Distributions /*
1487*2c2f96dcSApple OSS Distributions * Reenable a previously disabled cache due to purge.
1488*2c2f96dcSApple OSS Distributions */
1489*2c2f96dcSApple OSS Distributions static void
mcache_cache_enable(void * arg)1490*2c2f96dcSApple OSS Distributions mcache_cache_enable(void *arg)
1491*2c2f96dcSApple OSS Distributions {
1492*2c2f96dcSApple OSS Distributions mcache_t *cp = arg;
1493*2c2f96dcSApple OSS Distributions
1494*2c2f96dcSApple OSS Distributions lck_mtx_lock_spin(&cp->mc_sync_lock);
1495*2c2f96dcSApple OSS Distributions cp->mc_purge_cnt = 0;
1496*2c2f96dcSApple OSS Distributions cp->mc_enable_cnt = 0;
1497*2c2f96dcSApple OSS Distributions lck_mtx_unlock(&cp->mc_sync_lock);
1498*2c2f96dcSApple OSS Distributions
1499*2c2f96dcSApple OSS Distributions mcache_cache_bkt_enable(cp);
1500*2c2f96dcSApple OSS Distributions }
1501*2c2f96dcSApple OSS Distributions
1502*2c2f96dcSApple OSS Distributions static void
mcache_update_timeout(__unused void * arg)1503*2c2f96dcSApple OSS Distributions mcache_update_timeout(__unused void *arg)
1504*2c2f96dcSApple OSS Distributions {
1505*2c2f96dcSApple OSS Distributions uint64_t deadline, leeway;
1506*2c2f96dcSApple OSS Distributions
1507*2c2f96dcSApple OSS Distributions clock_interval_to_deadline(mcache_reap_interval, NSEC_PER_SEC,
1508*2c2f96dcSApple OSS Distributions &deadline);
1509*2c2f96dcSApple OSS Distributions clock_interval_to_absolutetime_interval(mcache_reap_interval_leeway,
1510*2c2f96dcSApple OSS Distributions NSEC_PER_SEC, &leeway);
1511*2c2f96dcSApple OSS Distributions thread_call_enter_delayed_with_leeway(mcache_update_tcall, NULL,
1512*2c2f96dcSApple OSS Distributions deadline, leeway, THREAD_CALL_DELAY_LEEWAY);
1513*2c2f96dcSApple OSS Distributions }
1514*2c2f96dcSApple OSS Distributions
1515*2c2f96dcSApple OSS Distributions static void
mcache_update(thread_call_param_t arg __unused,thread_call_param_t dummy __unused)1516*2c2f96dcSApple OSS Distributions mcache_update(thread_call_param_t arg __unused,
1517*2c2f96dcSApple OSS Distributions thread_call_param_t dummy __unused)
1518*2c2f96dcSApple OSS Distributions {
1519*2c2f96dcSApple OSS Distributions mcache_applyall(mcache_cache_update);
1520*2c2f96dcSApple OSS Distributions mcache_update_timeout(NULL);
1521*2c2f96dcSApple OSS Distributions }
1522*2c2f96dcSApple OSS Distributions
1523*2c2f96dcSApple OSS Distributions static void
mcache_applyall(void (* func)(mcache_t *))1524*2c2f96dcSApple OSS Distributions mcache_applyall(void (*func)(mcache_t *))
1525*2c2f96dcSApple OSS Distributions {
1526*2c2f96dcSApple OSS Distributions mcache_t *cp;
1527*2c2f96dcSApple OSS Distributions
1528*2c2f96dcSApple OSS Distributions MCACHE_LIST_LOCK();
1529*2c2f96dcSApple OSS Distributions LIST_FOREACH(cp, &mcache_head, mc_list) {
1530*2c2f96dcSApple OSS Distributions func(cp);
1531*2c2f96dcSApple OSS Distributions }
1532*2c2f96dcSApple OSS Distributions MCACHE_LIST_UNLOCK();
1533*2c2f96dcSApple OSS Distributions }
1534*2c2f96dcSApple OSS Distributions
1535*2c2f96dcSApple OSS Distributions static void
mcache_dispatch(void (* func)(void *),void * arg)1536*2c2f96dcSApple OSS Distributions mcache_dispatch(void (*func)(void *), void *arg)
1537*2c2f96dcSApple OSS Distributions {
1538*2c2f96dcSApple OSS Distributions ASSERT(func != NULL);
1539*2c2f96dcSApple OSS Distributions timeout(func, arg, hz / 1000);
1540*2c2f96dcSApple OSS Distributions }
1541*2c2f96dcSApple OSS Distributions
1542*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_buffer_log(mcache_audit_t * mca,void * addr,mcache_t * cp,struct timeval * base_ts)1543*2c2f96dcSApple OSS Distributions mcache_buffer_log(mcache_audit_t *mca, void *addr, mcache_t *cp,
1544*2c2f96dcSApple OSS Distributions struct timeval *base_ts)
1545*2c2f96dcSApple OSS Distributions {
1546*2c2f96dcSApple OSS Distributions struct timeval now, base = { .tv_sec = 0, .tv_usec = 0 };
1547*2c2f96dcSApple OSS Distributions void *stack[MCACHE_STACK_DEPTH + 1];
1548*2c2f96dcSApple OSS Distributions struct mca_trn *transaction;
1549*2c2f96dcSApple OSS Distributions
1550*2c2f96dcSApple OSS Distributions transaction = &mca->mca_trns[mca->mca_next_trn];
1551*2c2f96dcSApple OSS Distributions
1552*2c2f96dcSApple OSS Distributions mca->mca_addr = addr;
1553*2c2f96dcSApple OSS Distributions mca->mca_cache = cp;
1554*2c2f96dcSApple OSS Distributions
1555*2c2f96dcSApple OSS Distributions transaction->mca_thread = current_thread();
1556*2c2f96dcSApple OSS Distributions
1557*2c2f96dcSApple OSS Distributions bzero(stack, sizeof(stack));
1558*2c2f96dcSApple OSS Distributions transaction->mca_depth = (uint16_t)OSBacktrace(stack, MCACHE_STACK_DEPTH + 1) - 1;
1559*2c2f96dcSApple OSS Distributions bcopy(&stack[1], transaction->mca_stack,
1560*2c2f96dcSApple OSS Distributions sizeof(transaction->mca_stack));
1561*2c2f96dcSApple OSS Distributions
1562*2c2f96dcSApple OSS Distributions microuptime(&now);
1563*2c2f96dcSApple OSS Distributions if (base_ts != NULL) {
1564*2c2f96dcSApple OSS Distributions base = *base_ts;
1565*2c2f96dcSApple OSS Distributions }
1566*2c2f96dcSApple OSS Distributions /* tstamp is in ms relative to base_ts */
1567*2c2f96dcSApple OSS Distributions transaction->mca_tstamp = ((now.tv_usec - base.tv_usec) / 1000);
1568*2c2f96dcSApple OSS Distributions if ((now.tv_sec - base.tv_sec) > 0) {
1569*2c2f96dcSApple OSS Distributions transaction->mca_tstamp += ((now.tv_sec - base.tv_sec) * 1000);
1570*2c2f96dcSApple OSS Distributions }
1571*2c2f96dcSApple OSS Distributions
1572*2c2f96dcSApple OSS Distributions mca->mca_next_trn =
1573*2c2f96dcSApple OSS Distributions (mca->mca_next_trn + 1) % mca_trn_max;
1574*2c2f96dcSApple OSS Distributions }
1575*2c2f96dcSApple OSS Distributions
1576*2c2f96dcSApple OSS Distributions /*
1577*2c2f96dcSApple OSS Distributions * N.B.: mcache_set_pattern(), mcache_verify_pattern() and
1578*2c2f96dcSApple OSS Distributions * mcache_verify_set_pattern() are marked as noinline to prevent the
1579*2c2f96dcSApple OSS Distributions * compiler from aliasing pointers when they are inlined inside the callers
1580*2c2f96dcSApple OSS Distributions * (e.g. mcache_audit_free_verify_set()) which would be undefined behavior.
1581*2c2f96dcSApple OSS Distributions */
1582*2c2f96dcSApple OSS Distributions __private_extern__ OS_NOINLINE void
mcache_set_pattern(u_int64_t pattern,void * buf_arg,size_t size)1583*2c2f96dcSApple OSS Distributions mcache_set_pattern(u_int64_t pattern, void *buf_arg, size_t size)
1584*2c2f96dcSApple OSS Distributions {
1585*2c2f96dcSApple OSS Distributions u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1586*2c2f96dcSApple OSS Distributions u_int64_t *buf = (u_int64_t *)buf_arg;
1587*2c2f96dcSApple OSS Distributions
1588*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1589*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1590*2c2f96dcSApple OSS Distributions
1591*2c2f96dcSApple OSS Distributions while (buf < buf_end) {
1592*2c2f96dcSApple OSS Distributions *buf++ = pattern;
1593*2c2f96dcSApple OSS Distributions }
1594*2c2f96dcSApple OSS Distributions }
1595*2c2f96dcSApple OSS Distributions
1596*2c2f96dcSApple OSS Distributions __private_extern__ OS_NOINLINE void *
mcache_verify_pattern(u_int64_t pattern,void * buf_arg,size_t size)1597*2c2f96dcSApple OSS Distributions mcache_verify_pattern(u_int64_t pattern, void *buf_arg, size_t size)
1598*2c2f96dcSApple OSS Distributions {
1599*2c2f96dcSApple OSS Distributions u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1600*2c2f96dcSApple OSS Distributions u_int64_t *buf;
1601*2c2f96dcSApple OSS Distributions
1602*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1603*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1604*2c2f96dcSApple OSS Distributions
1605*2c2f96dcSApple OSS Distributions for (buf = buf_arg; buf < buf_end; buf++) {
1606*2c2f96dcSApple OSS Distributions if (*buf != pattern) {
1607*2c2f96dcSApple OSS Distributions return buf;
1608*2c2f96dcSApple OSS Distributions }
1609*2c2f96dcSApple OSS Distributions }
1610*2c2f96dcSApple OSS Distributions return NULL;
1611*2c2f96dcSApple OSS Distributions }
1612*2c2f96dcSApple OSS Distributions
1613*2c2f96dcSApple OSS Distributions OS_NOINLINE static void *
mcache_verify_set_pattern(u_int64_t old,u_int64_t new,void * buf_arg,size_t size)1614*2c2f96dcSApple OSS Distributions mcache_verify_set_pattern(u_int64_t old, u_int64_t new, void *buf_arg,
1615*2c2f96dcSApple OSS Distributions size_t size)
1616*2c2f96dcSApple OSS Distributions {
1617*2c2f96dcSApple OSS Distributions u_int64_t *buf_end = (u_int64_t *)((void *)((char *)buf_arg + size));
1618*2c2f96dcSApple OSS Distributions u_int64_t *buf;
1619*2c2f96dcSApple OSS Distributions
1620*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(buf_arg, sizeof(u_int64_t)));
1621*2c2f96dcSApple OSS Distributions VERIFY(IS_P2ALIGNED(size, sizeof(u_int64_t)));
1622*2c2f96dcSApple OSS Distributions
1623*2c2f96dcSApple OSS Distributions for (buf = buf_arg; buf < buf_end; buf++) {
1624*2c2f96dcSApple OSS Distributions if (*buf != old) {
1625*2c2f96dcSApple OSS Distributions mcache_set_pattern(old, buf_arg,
1626*2c2f96dcSApple OSS Distributions (uintptr_t)buf - (uintptr_t)buf_arg);
1627*2c2f96dcSApple OSS Distributions return buf;
1628*2c2f96dcSApple OSS Distributions }
1629*2c2f96dcSApple OSS Distributions *buf = new;
1630*2c2f96dcSApple OSS Distributions }
1631*2c2f96dcSApple OSS Distributions return NULL;
1632*2c2f96dcSApple OSS Distributions }
1633*2c2f96dcSApple OSS Distributions
1634*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_audit_free_verify(mcache_audit_t * mca,void * base,size_t offset,size_t size)1635*2c2f96dcSApple OSS Distributions mcache_audit_free_verify(mcache_audit_t *mca, void *base, size_t offset,
1636*2c2f96dcSApple OSS Distributions size_t size)
1637*2c2f96dcSApple OSS Distributions {
1638*2c2f96dcSApple OSS Distributions void *addr;
1639*2c2f96dcSApple OSS Distributions u_int64_t *oaddr64;
1640*2c2f96dcSApple OSS Distributions mcache_obj_t *next;
1641*2c2f96dcSApple OSS Distributions
1642*2c2f96dcSApple OSS Distributions addr = (void *)((uintptr_t)base + offset);
1643*2c2f96dcSApple OSS Distributions next = ((mcache_obj_t *)addr)->obj_next;
1644*2c2f96dcSApple OSS Distributions
1645*2c2f96dcSApple OSS Distributions /* For the "obj_next" pointer in the buffer */
1646*2c2f96dcSApple OSS Distributions oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t));
1647*2c2f96dcSApple OSS Distributions *oaddr64 = MCACHE_FREE_PATTERN;
1648*2c2f96dcSApple OSS Distributions
1649*2c2f96dcSApple OSS Distributions if ((oaddr64 = mcache_verify_pattern(MCACHE_FREE_PATTERN,
1650*2c2f96dcSApple OSS Distributions (caddr_t)base, size)) != NULL) {
1651*2c2f96dcSApple OSS Distributions mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
1652*2c2f96dcSApple OSS Distributions (int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
1653*2c2f96dcSApple OSS Distributions /* NOTREACHED */
1654*2c2f96dcSApple OSS Distributions }
1655*2c2f96dcSApple OSS Distributions ((mcache_obj_t *)addr)->obj_next = next;
1656*2c2f96dcSApple OSS Distributions }
1657*2c2f96dcSApple OSS Distributions
1658*2c2f96dcSApple OSS Distributions __private_extern__ void
mcache_audit_free_verify_set(mcache_audit_t * mca,void * base,size_t offset,size_t size)1659*2c2f96dcSApple OSS Distributions mcache_audit_free_verify_set(mcache_audit_t *mca, void *base, size_t offset,
1660*2c2f96dcSApple OSS Distributions size_t size)
1661*2c2f96dcSApple OSS Distributions {
1662*2c2f96dcSApple OSS Distributions void *addr;
1663*2c2f96dcSApple OSS Distributions u_int64_t *oaddr64;
1664*2c2f96dcSApple OSS Distributions mcache_obj_t *next;
1665*2c2f96dcSApple OSS Distributions
1666*2c2f96dcSApple OSS Distributions addr = (void *)((uintptr_t)base + offset);
1667*2c2f96dcSApple OSS Distributions next = ((mcache_obj_t *)addr)->obj_next;
1668*2c2f96dcSApple OSS Distributions
1669*2c2f96dcSApple OSS Distributions /* For the "obj_next" pointer in the buffer */
1670*2c2f96dcSApple OSS Distributions oaddr64 = (u_int64_t *)P2ROUNDDOWN(addr, sizeof(u_int64_t));
1671*2c2f96dcSApple OSS Distributions *oaddr64 = MCACHE_FREE_PATTERN;
1672*2c2f96dcSApple OSS Distributions
1673*2c2f96dcSApple OSS Distributions if ((oaddr64 = mcache_verify_set_pattern(MCACHE_FREE_PATTERN,
1674*2c2f96dcSApple OSS Distributions MCACHE_UNINITIALIZED_PATTERN, (caddr_t)base, size)) != NULL) {
1675*2c2f96dcSApple OSS Distributions mcache_audit_panic(mca, addr, (caddr_t)oaddr64 - (caddr_t)base,
1676*2c2f96dcSApple OSS Distributions (int64_t)MCACHE_FREE_PATTERN, (int64_t)*oaddr64);
1677*2c2f96dcSApple OSS Distributions /* NOTREACHED */
1678*2c2f96dcSApple OSS Distributions }
1679*2c2f96dcSApple OSS Distributions ((mcache_obj_t *)addr)->obj_next = next;
1680*2c2f96dcSApple OSS Distributions }
1681*2c2f96dcSApple OSS Distributions
1682*2c2f96dcSApple OSS Distributions #undef panic
1683*2c2f96dcSApple OSS Distributions
1684*2c2f96dcSApple OSS Distributions #define DUMP_TRN_FMT() \
1685*2c2f96dcSApple OSS Distributions "%s transaction thread %p saved PC stack (%d deep):\n" \
1686*2c2f96dcSApple OSS Distributions "\t%p, %p, %p, %p, %p, %p, %p, %p\n" \
1687*2c2f96dcSApple OSS Distributions "\t%p, %p, %p, %p, %p, %p, %p, %p\n"
1688*2c2f96dcSApple OSS Distributions
1689*2c2f96dcSApple OSS Distributions #define DUMP_TRN_FIELDS(s, x) \
1690*2c2f96dcSApple OSS Distributions s, \
1691*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_thread, mca->mca_trns[x].mca_depth, \
1692*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[0], mca->mca_trns[x].mca_stack[1], \
1693*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[2], mca->mca_trns[x].mca_stack[3], \
1694*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[4], mca->mca_trns[x].mca_stack[5], \
1695*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[6], mca->mca_trns[x].mca_stack[7], \
1696*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[8], mca->mca_trns[x].mca_stack[9], \
1697*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[10], mca->mca_trns[x].mca_stack[11], \
1698*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[12], mca->mca_trns[x].mca_stack[13], \
1699*2c2f96dcSApple OSS Distributions mca->mca_trns[x].mca_stack[14], mca->mca_trns[x].mca_stack[15]
1700*2c2f96dcSApple OSS Distributions
1701*2c2f96dcSApple OSS Distributions #define MCA_TRN_LAST ((mca->mca_next_trn + mca_trn_max) % mca_trn_max)
1702*2c2f96dcSApple OSS Distributions #define MCA_TRN_PREV ((mca->mca_next_trn + mca_trn_max - 1) % mca_trn_max)
1703*2c2f96dcSApple OSS Distributions
1704*2c2f96dcSApple OSS Distributions __private_extern__ char *
mcache_dump_mca(char buf[static DUMP_MCA_BUF_SIZE],mcache_audit_t * mca)1705*2c2f96dcSApple OSS Distributions mcache_dump_mca(char buf[static DUMP_MCA_BUF_SIZE], mcache_audit_t *mca)
1706*2c2f96dcSApple OSS Distributions {
1707*2c2f96dcSApple OSS Distributions snprintf(buf, DUMP_MCA_BUF_SIZE,
1708*2c2f96dcSApple OSS Distributions "mca %p: addr %p, cache %p (%s) nxttrn %d\n"
1709*2c2f96dcSApple OSS Distributions DUMP_TRN_FMT()
1710*2c2f96dcSApple OSS Distributions DUMP_TRN_FMT(),
1711*2c2f96dcSApple OSS Distributions
1712*2c2f96dcSApple OSS Distributions mca, mca->mca_addr, mca->mca_cache,
1713*2c2f96dcSApple OSS Distributions mca->mca_cache ? mca->mca_cache->mc_name : "?",
1714*2c2f96dcSApple OSS Distributions mca->mca_next_trn,
1715*2c2f96dcSApple OSS Distributions
1716*2c2f96dcSApple OSS Distributions DUMP_TRN_FIELDS("last", MCA_TRN_LAST),
1717*2c2f96dcSApple OSS Distributions DUMP_TRN_FIELDS("previous", MCA_TRN_PREV));
1718*2c2f96dcSApple OSS Distributions
1719*2c2f96dcSApple OSS Distributions return buf;
1720*2c2f96dcSApple OSS Distributions }
1721*2c2f96dcSApple OSS Distributions
1722*2c2f96dcSApple OSS Distributions __attribute__((noreturn))
1723*2c2f96dcSApple OSS Distributions static void
mcache_audit_panic(mcache_audit_t * mca,void * addr,size_t offset,int64_t expected,int64_t got)1724*2c2f96dcSApple OSS Distributions mcache_audit_panic(mcache_audit_t *mca, void *addr, size_t offset,
1725*2c2f96dcSApple OSS Distributions int64_t expected, int64_t got)
1726*2c2f96dcSApple OSS Distributions {
1727*2c2f96dcSApple OSS Distributions char buf[DUMP_MCA_BUF_SIZE];
1728*2c2f96dcSApple OSS Distributions
1729*2c2f96dcSApple OSS Distributions if (mca == NULL) {
1730*2c2f96dcSApple OSS Distributions panic("mcache_audit: buffer %p modified after free at "
1731*2c2f96dcSApple OSS Distributions "offset 0x%lx (0x%llx instead of 0x%llx)\n", addr,
1732*2c2f96dcSApple OSS Distributions offset, got, expected);
1733*2c2f96dcSApple OSS Distributions /* NOTREACHED */
1734*2c2f96dcSApple OSS Distributions __builtin_unreachable();
1735*2c2f96dcSApple OSS Distributions }
1736*2c2f96dcSApple OSS Distributions
1737*2c2f96dcSApple OSS Distributions panic("mcache_audit: buffer %p modified after free at offset 0x%lx "
1738*2c2f96dcSApple OSS Distributions "(0x%llx instead of 0x%llx)\n%s\n",
1739*2c2f96dcSApple OSS Distributions addr, offset, got, expected, mcache_dump_mca(buf, mca));
1740*2c2f96dcSApple OSS Distributions /* NOTREACHED */
1741*2c2f96dcSApple OSS Distributions __builtin_unreachable();
1742*2c2f96dcSApple OSS Distributions }
1743