1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #define _FN_KPRINTF
31 #include <pexpert/pexpert.h> /* for PE_parse_boot_argn */
32 #include <libkern/OSDebug.h> /* for OSBacktrace */
33 #include <kern/sched_prim.h> /* for assert_wait */
34
35 /*
36 * Memory allocator with per-CPU caching (magazines), derived from the kmem
37 * magazine concept and implementation as described in the following paper:
38 * http://www.usenix.org/events/usenix01/full_papers/bonwick/bonwick.pdf
39 *
40 * That implementation is Copyright 2006 Sun Microsystems, Inc. All rights
41 * reserved. Use is subject to license terms.
42 *
43 * This derivative differs from the original kmem slab allocator, in that:
44 *
45 * a) There is always a discrete bufctl per object, even for small sizes.
46 * This increases the overhead, but is necessary as Skywalk objects
47 * coming from the slab may be shared (RO or RW) with userland; therefore
48 * embedding the KVA pointer linkage in freed objects is a non-starter.
49 *
50 * b) Writing patterns to the slab at slab creation or destruction time
51 * (when debugging is enabled) is not implemented, as the object may
52 * be shared (RW) with userland and thus we cannot panic upon pattern
53 * mismatch episodes. This can be relaxed so that we conditionally
54 * verify the pattern for kernel-only memory.
55 *
56 * This derivative also differs from Darwin's mcache allocator (which itself
57 * is a derivative of the original kmem slab allocator), in that:
58 *
59 * 1) The slab layer is internal to skmem_cache, unlike mcache's external
60 * slab layer required to support mbufs. skmem_cache also supports
61 * constructing and deconstructing objects, while mcache does not.
62 * This brings skmem_cache's model closer to that of the original
63 * kmem slab allocator.
64 *
65 * 2) mcache allows for batch allocation and free by way of chaining the
66 * objects together using a linked list. This requires using a part
67 * of the object to act as the linkage, which is against Skywalk's
68 * requirements of not exposing any KVA pointer to userland. Although
69 * this is supported by skmem_cache, chaining is only possible if the
70 * region is not mapped to userland. That implies that kernel-only
71 * objects can be chained provided the cache is created with batching
72 * mode enabled, and that the object is large enough to contain the
73 * skmem_obj structure.
74 *
75 * In other words, skmem_cache is a hybrid of a hybrid custom allocator that
76 * implements features that are required by Skywalk. In addition to being
77 * aware of userland access on the buffers, in also supports mirrored backend
78 * memory regions. This allows a cache to manage two independent memory
79 * regions, such that allocating/freeing an object from/to one results in
80 * allocating/freeing a shadow object in another, thus guaranteeing that both
81 * objects share the same lifetime.
82 */
83
84 static uint32_t ncpu; /* total # of initialized CPUs */
85
86 static LCK_MTX_DECLARE_ATTR(skmem_cache_lock, &skmem_lock_grp, &skmem_lock_attr);
87 static struct thread *skmem_lock_owner = THREAD_NULL;
88
89 static LCK_GRP_DECLARE(skmem_sl_lock_grp, "skmem_slab");
90 static LCK_GRP_DECLARE(skmem_dp_lock_grp, "skmem_depot");
91 static LCK_GRP_DECLARE(skmem_cpu_lock_grp, "skmem_cpu_cache");
92
93 #define SKMEM_CACHE_LOCK() do { \
94 lck_mtx_lock(&skmem_cache_lock); \
95 skmem_lock_owner = current_thread(); \
96 } while (0)
97 #define SKMEM_CACHE_UNLOCK() do { \
98 skmem_lock_owner = THREAD_NULL; \
99 lck_mtx_unlock(&skmem_cache_lock); \
100 } while (0)
101 #define SKMEM_CACHE_LOCK_ASSERT_HELD() \
102 LCK_MTX_ASSERT(&skmem_cache_lock, LCK_MTX_ASSERT_OWNED)
103 #define SKMEM_CACHE_LOCK_ASSERT_NOTHELD() \
104 LCK_MTX_ASSERT(&skmem_cache_lock, LCK_MTX_ASSERT_NOTOWNED)
105
106 #define SKM_SLAB_LOCK(_skm) \
107 lck_mtx_lock(&(_skm)->skm_sl_lock)
108 #define SKM_SLAB_LOCK_ASSERT_HELD(_skm) \
109 LCK_MTX_ASSERT(&(_skm)->skm_sl_lock, LCK_MTX_ASSERT_OWNED)
110 #define SKM_SLAB_LOCK_ASSERT_NOTHELD(_skm) \
111 LCK_MTX_ASSERT(&(_skm)->skm_sl_lock, LCK_MTX_ASSERT_NOTOWNED)
112 #define SKM_SLAB_UNLOCK(_skm) \
113 lck_mtx_unlock(&(_skm)->skm_sl_lock)
114
115 #define SKM_DEPOT_LOCK(_skm) \
116 lck_mtx_lock(&(_skm)->skm_dp_lock)
117 #define SKM_DEPOT_LOCK_SPIN(_skm) \
118 lck_mtx_lock_spin(&(_skm)->skm_dp_lock)
119 #define SKM_DEPOT_CONVERT_LOCK(_skm) \
120 lck_mtx_convert_spin(&(_skm)->skm_dp_lock)
121 #define SKM_DEPOT_LOCK_TRY(_skm) \
122 lck_mtx_try_lock(&(_skm)->skm_dp_lock)
123 #define SKM_DEPOT_LOCK_ASSERT_HELD(_skm) \
124 LCK_MTX_ASSERT(&(_skm)->skm_dp_lock, LCK_MTX_ASSERT_OWNED)
125 #define SKM_DEPOT_LOCK_ASSERT_NOTHELD(_skm) \
126 LCK_MTX_ASSERT(&(_skm)->skm_dp_lock, LCK_MTX_ASSERT_NOTOWNED)
127 #define SKM_DEPOT_UNLOCK(_skm) \
128 lck_mtx_unlock(&(_skm)->skm_dp_lock)
129
130 #define SKM_RESIZE_LOCK(_skm) \
131 lck_mtx_lock(&(_skm)->skm_rs_lock)
132 #define SKM_RESIZE_LOCK_ASSERT_HELD(_skm) \
133 LCK_MTX_ASSERT(&(_skm)->skm_rs_lock, LCK_MTX_ASSERT_OWNED)
134 #define SKM_RESIZE_LOCK_ASSERT_NOTHELD(_skm) \
135 LCK_MTX_ASSERT(&(_skm)->skm_rs_lock, LCK_MTX_ASSERT_NOTOWNED)
136 #define SKM_RESIZE_UNLOCK(_skm) \
137 lck_mtx_unlock(&(_skm)->skm_rs_lock)
138
139 #define SKM_CPU_LOCK(_cp) \
140 lck_mtx_lock(&(_cp)->cp_lock)
141 #define SKM_CPU_LOCK_SPIN(_cp) \
142 lck_mtx_lock_spin(&(_cp)->cp_lock)
143 #define SKM_CPU_CONVERT_LOCK(_cp) \
144 lck_mtx_convert_spin(&(_cp)->cp_lock)
145 #define SKM_CPU_LOCK_ASSERT_HELD(_cp) \
146 LCK_MTX_ASSERT(&(_cp)->cp_lock, LCK_MTX_ASSERT_OWNED)
147 #define SKM_CPU_LOCK_ASSERT_NOTHELD(_cp) \
148 LCK_MTX_ASSERT(&(_cp)->cp_lock, LCK_MTX_ASSERT_NOTOWNED)
149 #define SKM_CPU_UNLOCK(_cp) \
150 lck_mtx_unlock(&(_cp)->cp_lock)
151
152 #define SKM_ZONE_MAX 256
153
154 static struct zone *skm_zone; /* zone for skmem_cache */
155
156 static struct skmem_cache *skmem_slab_cache; /* cache for skmem_slab */
157 static struct skmem_cache *skmem_bufctl_cache; /* cache for skmem_bufctl */
158 static unsigned int bc_size; /* size of bufctl */
159
160 /*
161 * Magazine types (one per row.)
162 *
163 * The first column defines the number of objects that the magazine can hold.
164 * Using that number, we derive the effective number: the aggregate count of
165 * object pointers, plus 2 pointers (skmem_mag linkage + magazine type).
166 * This would result in an object size that is aligned on the CPU cache
167 * size boundary; the exception to this is the KASAN mode where the size
168 * would be larger due to the redzone regions.
169 *
170 * The second column defines the alignment of the magazine. Because each
171 * magazine is used at the CPU-layer cache, we need to ensure there is no
172 * false sharing across the CPUs, and align the magazines to the maximum
173 * cache alignment size, for simplicity. The value of 0 may be used to
174 * indicate natural pointer size alignment.
175 *
176 * The third column defines the starting magazine type for a given cache,
177 * determined at the cache's creation time based on its chunk size.
178 *
179 * The fourth column defines the magazine type limit for a given cache.
180 * Magazine resizing will only occur if the chunk size is less than this.
181 */
182 static struct skmem_magtype skmem_magtype[] = {
183 #if defined(__LP64__)
184 { .mt_magsize = 14, .mt_align = 0, .mt_minbuf = 128, .mt_maxbuf = 512,
185 .mt_cache = NULL, .mt_cname = "" },
186 { .mt_magsize = 30, .mt_align = 0, .mt_minbuf = 96, .mt_maxbuf = 256,
187 .mt_cache = NULL, .mt_cname = "" },
188 { .mt_magsize = 46, .mt_align = 0, .mt_minbuf = 64, .mt_maxbuf = 128,
189 .mt_cache = NULL, .mt_cname = "" },
190 { .mt_magsize = 62, .mt_align = 0, .mt_minbuf = 32, .mt_maxbuf = 64,
191 .mt_cache = NULL, .mt_cname = "" },
192 { .mt_magsize = 94, .mt_align = 0, .mt_minbuf = 16, .mt_maxbuf = 32,
193 .mt_cache = NULL, .mt_cname = "" },
194 { .mt_magsize = 126, .mt_align = 0, .mt_minbuf = 8, .mt_maxbuf = 16,
195 .mt_cache = NULL, .mt_cname = "" },
196 { .mt_magsize = 142, .mt_align = 0, .mt_minbuf = 0, .mt_maxbuf = 8,
197 .mt_cache = NULL, .mt_cname = "" },
198 { .mt_magsize = 158, .mt_align = 0, .mt_minbuf = 0, .mt_maxbuf = 0,
199 .mt_cache = NULL, .mt_cname = "" },
200 #else /* !__LP64__ */
201 { .mt_magsize = 14, .mt_align = 0, .mt_minbuf = 0, .mt_maxbuf = 0,
202 .mt_cache = NULL, .mt_cname = "" },
203 #endif /* !__LP64__ */
204 };
205
206 /*
207 * Hash table bounds. Start with the initial value, and rescale up to
208 * the specified limit. Ideally we don't need a limit, but in practice
209 * this helps guard against runaways. These values should be revisited
210 * in future and be adjusted as needed.
211 */
212 #define SKMEM_CACHE_HASH_INITIAL 64 /* initial hash table size */
213 #define SKMEM_CACHE_HASH_LIMIT 8192 /* hash table size limit */
214
215 #define SKMEM_CACHE_HASH_INDEX(_a, _s, _m) (((_a) >> (_s)) & (_m))
216 #define SKMEM_CACHE_HASH(_skm, _buf) \
217 (&(_skm)->skm_hash_table[SKMEM_CACHE_HASH_INDEX((uintptr_t)_buf, \
218 (_skm)->skm_hash_shift, (_skm)->skm_hash_mask)])
219
220 /*
221 * The last magazine type.
222 */
223 static struct skmem_magtype *skmem_cache_magsize_last;
224
225 static TAILQ_HEAD(, skmem_cache) skmem_cache_head;
226 static boolean_t skmem_cache_ready;
227
228 static int skmem_slab_alloc_locked(struct skmem_cache *,
229 struct skmem_obj_info *, struct skmem_obj_info *, uint32_t);
230 static void skmem_slab_free_locked(struct skmem_cache *, void *);
231 static int skmem_slab_alloc_pseudo_locked(struct skmem_cache *,
232 struct skmem_obj_info *, struct skmem_obj_info *, uint32_t);
233 static void skmem_slab_free_pseudo_locked(struct skmem_cache *, void *);
234 static struct skmem_slab *skmem_slab_create(struct skmem_cache *, uint32_t);
235 static void skmem_slab_destroy(struct skmem_cache *, struct skmem_slab *);
236 static int skmem_magazine_ctor(struct skmem_obj_info *,
237 struct skmem_obj_info *, void *, uint32_t);
238 static void skmem_magazine_destroy(struct skmem_cache *, struct skmem_mag *,
239 int);
240 static uint32_t skmem_depot_batch_alloc(struct skmem_cache *,
241 struct skmem_maglist *, uint32_t *, struct skmem_mag **, uint32_t);
242 static void skmem_depot_batch_free(struct skmem_cache *, struct skmem_maglist *,
243 uint32_t *, struct skmem_mag *);
244 static void skmem_depot_ws_update(struct skmem_cache *);
245 static void skmem_depot_ws_zero(struct skmem_cache *);
246 static void skmem_depot_ws_reap(struct skmem_cache *);
247 static void skmem_cache_magazine_purge(struct skmem_cache *);
248 static void skmem_cache_magazine_enable(struct skmem_cache *, uint32_t);
249 static void skmem_cache_magazine_resize(struct skmem_cache *);
250 static void skmem_cache_hash_rescale(struct skmem_cache *);
251 static void skmem_cpu_reload(struct skmem_cpu_cache *, struct skmem_mag *, int);
252 static void skmem_cpu_batch_reload(struct skmem_cpu_cache *,
253 struct skmem_mag *, int);
254 static void skmem_cache_applyall(void (*)(struct skmem_cache *, uint32_t),
255 uint32_t);
256 static void skmem_cache_reclaim(struct skmem_cache *, uint32_t);
257 static void skmem_cache_reap_start(void);
258 static void skmem_cache_reap_done(void);
259 static void skmem_cache_reap_func(thread_call_param_t, thread_call_param_t);
260 static void skmem_cache_update_func(thread_call_param_t, thread_call_param_t);
261 static int skmem_cache_resize_enter(struct skmem_cache *, boolean_t);
262 static void skmem_cache_resize_exit(struct skmem_cache *);
263 static void skmem_audit_bufctl(struct skmem_bufctl *);
264 static void skmem_audit_buf(struct skmem_cache *, struct skmem_obj *);
265 static int skmem_cache_mib_get_sysctl SYSCTL_HANDLER_ARGS;
266
267 SYSCTL_PROC(_kern_skywalk_stats, OID_AUTO, cache,
268 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
269 0, 0, skmem_cache_mib_get_sysctl, "S,sk_stats_cache",
270 "Skywalk cache statistics");
271
272 static volatile uint32_t skmem_cache_reaping;
273 static thread_call_t skmem_cache_reap_tc;
274 static thread_call_t skmem_cache_update_tc;
275
276 extern kern_return_t thread_terminate(thread_t);
277 extern unsigned int ml_wait_max_cpus(void);
278
279 #define SKMEM_DEBUG_NOMAGAZINES 0x1 /* disable magazines layer */
280 #define SKMEM_DEBUG_AUDIT 0x2 /* audit transactions */
281 #define SKMEM_DEBUG_MASK (SKMEM_DEBUG_NOMAGAZINES|SKMEM_DEBUG_AUDIT)
282
283 #if DEBUG
284 static uint32_t skmem_debug = SKMEM_DEBUG_AUDIT;
285 #else /* !DEBUG */
286 static uint32_t skmem_debug = 0;
287 #endif /* !DEBUG */
288
289 static uint32_t skmem_clear_min = 0; /* clear on free threshold */
290
291 #define SKMEM_CACHE_UPDATE_INTERVAL 11 /* 11 seconds */
292 static uint32_t skmem_cache_update_interval = SKMEM_CACHE_UPDATE_INTERVAL;
293
294 #define SKMEM_DEPOT_CONTENTION 3 /* max failed trylock per interval */
295 static int skmem_cache_depot_contention = SKMEM_DEPOT_CONTENTION;
296
297 /*
298 * Too big a value will cause overflow and thus trip the assertion; the
299 * idea here is to set an upper limit for the time that a particular
300 * thread is allowed to perform retries before we give up and panic.
301 */
302 #define SKMEM_SLAB_MAX_BACKOFF (20 * USEC_PER_SEC) /* seconds */
303
304 /*
305 * Threshold (in msec) after which we reset the exponential backoff value
306 * back to its (random) initial value. Note that we allow the actual delay
307 * to be at most twice this value.
308 */
309 #define SKMEM_SLAB_BACKOFF_THRES 1024 /* up to ~2 sec (2048 msec) */
310
311 /*
312 * To reduce the likelihood of global synchronization between threads,
313 * we use some random value to start the exponential backoff.
314 */
315 #define SKMEM_SLAB_BACKOFF_RANDOM 4 /* range is [1,4] msec */
316
317 #if (DEVELOPMENT || DEBUG)
318 SYSCTL_UINT(_kern_skywalk_mem, OID_AUTO, cache_update_interval,
319 CTLFLAG_RW | CTLFLAG_LOCKED, &skmem_cache_update_interval,
320 SKMEM_CACHE_UPDATE_INTERVAL, "Cache update interval");
321 SYSCTL_INT(_kern_skywalk_mem, OID_AUTO, cache_depot_contention,
322 CTLFLAG_RW | CTLFLAG_LOCKED, &skmem_cache_depot_contention,
323 SKMEM_DEPOT_CONTENTION, "Depot contention");
324
325 static uint32_t skmem_cache_update_interval_saved = SKMEM_CACHE_UPDATE_INTERVAL;
326
327 /*
328 * Called by skmem_test_start() to set the update interval.
329 */
330 void
skmem_cache_test_start(uint32_t i)331 skmem_cache_test_start(uint32_t i)
332 {
333 skmem_cache_update_interval_saved = skmem_cache_update_interval;
334 skmem_cache_update_interval = i;
335 }
336
337 /*
338 * Called by skmem_test_stop() to restore the update interval.
339 */
340 void
skmem_cache_test_stop(void)341 skmem_cache_test_stop(void)
342 {
343 skmem_cache_update_interval = skmem_cache_update_interval_saved;
344 }
345 #endif /* (DEVELOPMENT || DEBUG) */
346
347 #define SKMEM_TAG_BUFCTL_HASH "com.apple.skywalk.bufctl.hash"
348 static SKMEM_TAG_DEFINE(skmem_tag_bufctl_hash, SKMEM_TAG_BUFCTL_HASH);
349
350 #define SKMEM_TAG_CACHE_MIB "com.apple.skywalk.cache.mib"
351 static SKMEM_TAG_DEFINE(skmem_tag_cache_mib, SKMEM_TAG_CACHE_MIB);
352
353 static int __skmem_cache_pre_inited = 0;
354 static int __skmem_cache_inited = 0;
355
356 /*
357 * Called before skmem_region_init().
358 */
359 void
skmem_cache_pre_init(void)360 skmem_cache_pre_init(void)
361 {
362 vm_size_t skm_size;
363
364 ASSERT(!__skmem_cache_pre_inited);
365
366 ncpu = ml_wait_max_cpus();
367
368 /* allocate extra in case we need to manually align the pointer */
369 if (skm_zone == NULL) {
370 skm_size = SKMEM_CACHE_SIZE(ncpu);
371 #if KASAN
372 /*
373 * When KASAN is enabled, the zone allocator adjusts the
374 * element size to include the redzone regions, in which
375 * case we assume that the elements won't start on the
376 * alignment boundary and thus need to do some fix-ups.
377 * These include increasing the effective object size
378 * which adds at least 136 bytes to the original size,
379 * as computed by skmem_region_params_config() above.
380 */
381 skm_size += (sizeof(void *) + CHANNEL_CACHE_ALIGN_MAX);
382 #endif /* KASAN */
383 skm_size = P2ROUNDUP(skm_size, CHANNEL_CACHE_ALIGN_MAX);
384 skm_zone = zone_create(SKMEM_ZONE_PREFIX ".skm", skm_size,
385 ZC_PGZ_USE_GUARDS | ZC_ZFREE_CLEARMEM | ZC_DESTRUCTIBLE);
386 }
387
388 TAILQ_INIT(&skmem_cache_head);
389
390 __skmem_cache_pre_inited = 1;
391 }
392
393 /*
394 * Called after skmem_region_init().
395 */
396 void
skmem_cache_init(void)397 skmem_cache_init(void)
398 {
399 uint32_t cpu_cache_line_size = skmem_cpu_cache_line_size();
400 struct skmem_magtype *mtp;
401 uint32_t i;
402
403 _CASSERT(SKMEM_CACHE_HASH_LIMIT >= SKMEM_CACHE_HASH_INITIAL);
404
405 _CASSERT(SKM_MODE_NOMAGAZINES == SCA_MODE_NOMAGAZINES);
406 _CASSERT(SKM_MODE_AUDIT == SCA_MODE_AUDIT);
407 _CASSERT(SKM_MODE_NOREDIRECT == SCA_MODE_NOREDIRECT);
408 _CASSERT(SKM_MODE_BATCH == SCA_MODE_BATCH);
409 _CASSERT(SKM_MODE_DYNAMIC == SCA_MODE_DYNAMIC);
410 _CASSERT(SKM_MODE_CLEARONFREE == SCA_MODE_CLEARONFREE);
411 _CASSERT(SKM_MODE_PSEUDO == SCA_MODE_PSEUDO);
412
413 ASSERT(__skmem_cache_pre_inited);
414 ASSERT(!__skmem_cache_inited);
415
416 PE_parse_boot_argn("skmem_debug", &skmem_debug, sizeof(skmem_debug));
417 skmem_debug &= SKMEM_DEBUG_MASK;
418
419 #if (DEVELOPMENT || DEBUG)
420 PE_parse_boot_argn("skmem_clear_min", &skmem_clear_min,
421 sizeof(skmem_clear_min));
422 #endif /* (DEVELOPMENT || DEBUG) */
423 if (skmem_clear_min == 0) {
424 /* zeroing 2 CPU cache lines practically comes for free */
425 skmem_clear_min = 2 * cpu_cache_line_size;
426 } else {
427 /* round it up to CPU cache line size */
428 skmem_clear_min = (uint32_t)P2ROUNDUP(skmem_clear_min,
429 cpu_cache_line_size);
430 }
431
432 /* create a cache for buffer control structures */
433 if (skmem_debug & SKMEM_DEBUG_AUDIT) {
434 bc_size = sizeof(struct skmem_bufctl_audit);
435 skmem_bufctl_cache = skmem_cache_create("bufctl.audit",
436 bc_size, sizeof(uint64_t), NULL, NULL,
437 NULL, NULL, NULL, 0);
438 } else {
439 bc_size = sizeof(struct skmem_bufctl);
440 skmem_bufctl_cache = skmem_cache_create("bufctl",
441 bc_size, sizeof(uint64_t), NULL, NULL,
442 NULL, NULL, NULL, 0);
443 }
444
445 /* create a cache for slab structures */
446 skmem_slab_cache = skmem_cache_create("slab",
447 sizeof(struct skmem_slab), sizeof(uint64_t), NULL, NULL, NULL,
448 NULL, NULL, 0);
449
450 /*
451 * Go thru the magazine type table and create an cache for each.
452 */
453 for (i = 0; i < sizeof(skmem_magtype) / sizeof(*mtp); i++) {
454 mtp = &skmem_magtype[i];
455
456 if (mtp->mt_align != 0 &&
457 ((mtp->mt_align & (mtp->mt_align - 1)) != 0 ||
458 mtp->mt_align < (int)cpu_cache_line_size)) {
459 panic("%s: bad alignment %d", __func__, mtp->mt_align);
460 /* NOTREACHED */
461 __builtin_unreachable();
462 }
463 (void) snprintf(mtp->mt_cname, sizeof(mtp->mt_cname),
464 "mg.%d", mtp->mt_magsize);
465
466 /* create an cache for this magazine type */
467 mtp->mt_cache = skmem_cache_create(mtp->mt_cname,
468 SKMEM_MAG_SIZE(mtp->mt_magsize), mtp->mt_align,
469 skmem_magazine_ctor, NULL, NULL, mtp, NULL, 0);
470
471 /* remember the last magazine type */
472 skmem_cache_magsize_last = mtp;
473 }
474
475 VERIFY(skmem_cache_magsize_last != NULL);
476 VERIFY(skmem_cache_magsize_last->mt_minbuf == 0);
477 VERIFY(skmem_cache_magsize_last->mt_maxbuf == 0);
478
479 /*
480 * Allocate thread calls for cache reap and update operations.
481 */
482 skmem_cache_reap_tc =
483 thread_call_allocate_with_options(skmem_cache_reap_func,
484 NULL, THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
485 skmem_cache_update_tc =
486 thread_call_allocate_with_options(skmem_cache_update_func,
487 NULL, THREAD_CALL_PRIORITY_KERNEL, THREAD_CALL_OPTIONS_ONCE);
488 if (skmem_cache_reap_tc == NULL || skmem_cache_update_tc == NULL) {
489 panic("%s: thread_call_allocate failed", __func__);
490 /* NOTREACHED */
491 __builtin_unreachable();
492 }
493
494 /*
495 * We're ready; go through existing skmem_cache entries
496 * (if any) and enable the magazines layer for each.
497 */
498 skmem_cache_applyall(skmem_cache_magazine_enable, 0);
499 skmem_cache_ready = TRUE;
500
501 /* and start the periodic cache update machinery */
502 skmem_dispatch(skmem_cache_update_tc, NULL,
503 (skmem_cache_update_interval * NSEC_PER_SEC));
504
505 __skmem_cache_inited = 1;
506 }
507
508 void
skmem_cache_fini(void)509 skmem_cache_fini(void)
510 {
511 struct skmem_magtype *mtp;
512 uint32_t i;
513
514 if (__skmem_cache_inited) {
515 ASSERT(TAILQ_EMPTY(&skmem_cache_head));
516
517 for (i = 0; i < sizeof(skmem_magtype) / sizeof(*mtp); i++) {
518 mtp = &skmem_magtype[i];
519 skmem_cache_destroy(mtp->mt_cache);
520 mtp->mt_cache = NULL;
521 }
522 skmem_cache_destroy(skmem_slab_cache);
523 skmem_slab_cache = NULL;
524 skmem_cache_destroy(skmem_bufctl_cache);
525 skmem_bufctl_cache = NULL;
526
527 if (skmem_cache_reap_tc != NULL) {
528 (void) thread_call_cancel_wait(skmem_cache_reap_tc);
529 (void) thread_call_free(skmem_cache_reap_tc);
530 skmem_cache_reap_tc = NULL;
531 }
532 if (skmem_cache_update_tc != NULL) {
533 (void) thread_call_cancel_wait(skmem_cache_update_tc);
534 (void) thread_call_free(skmem_cache_update_tc);
535 skmem_cache_update_tc = NULL;
536 }
537
538 __skmem_cache_inited = 0;
539 }
540
541 if (__skmem_cache_pre_inited) {
542 if (skm_zone != NULL) {
543 zdestroy(skm_zone);
544 skm_zone = NULL;
545 }
546
547 __skmem_cache_pre_inited = 0;
548 }
549 }
550
551 /*
552 * Create a cache.
553 */
554 struct skmem_cache *
skmem_cache_create(const char * name,size_t bufsize,size_t bufalign,skmem_ctor_fn_t ctor,skmem_dtor_fn_t dtor,skmem_reclaim_fn_t reclaim,void * private,struct skmem_region * region,uint32_t cflags)555 skmem_cache_create(const char *name, size_t bufsize, size_t bufalign,
556 skmem_ctor_fn_t ctor, skmem_dtor_fn_t dtor, skmem_reclaim_fn_t reclaim,
557 void *private, struct skmem_region *region, uint32_t cflags)
558 {
559 boolean_t pseudo = (region == NULL);
560 struct skmem_magtype *mtp;
561 struct skmem_cache *skm;
562 void *buf;
563 size_t segsize;
564 size_t chunksize;
565 size_t objsize;
566 size_t objalign;
567 uint32_t i, cpuid;
568
569 /* enforce 64-bit minimum alignment for buffers */
570 if (bufalign == 0) {
571 bufalign = SKMEM_CACHE_ALIGN;
572 }
573 bufalign = P2ROUNDUP(bufalign, SKMEM_CACHE_ALIGN);
574
575 /* enforce alignment to be a power of 2 */
576 VERIFY(powerof2(bufalign));
577
578 if (region == NULL) {
579 struct skmem_region_params srp;
580
581 /* batching is currently not supported on pseudo regions */
582 VERIFY(!(cflags & SKMEM_CR_BATCH));
583
584 srp = *skmem_get_default(SKMEM_REGION_INTRINSIC);
585 ASSERT(srp.srp_cflags == SKMEM_REGION_CR_PSEUDO);
586
587 /* objalign is always equal to bufalign */
588 srp.srp_align = objalign = bufalign;
589 srp.srp_r_obj_cnt = 1;
590 srp.srp_r_obj_size = (uint32_t)bufsize;
591 skmem_region_params_config(&srp);
592
593 /* allocate region for intrinsics */
594 region = skmem_region_create(name, &srp, NULL, NULL, NULL);
595 VERIFY(region->skr_c_obj_size >= P2ROUNDUP(bufsize, bufalign));
596 VERIFY(objalign == region->skr_align);
597 #if KASAN
598 /*
599 * When KASAN is enabled, the zone allocator adjusts the
600 * element size to include the redzone regions, in which
601 * case we assume that the elements won't start on the
602 * alignment boundary and thus need to do some fix-ups.
603 * These include increasing the effective object size
604 * which adds at least 16 bytes to the original size,
605 * as computed by skmem_region_params_config() above.
606 */
607 VERIFY(region->skr_c_obj_size >=
608 (bufsize + sizeof(uint64_t) + bufalign));
609 #endif /* KASAN */
610 /* enable magazine resizing by default */
611 cflags |= SKMEM_CR_DYNAMIC;
612
613 /*
614 * For consistency with ZC_ZFREE_CLEARMEM on skr->zreg,
615 * even though it's a no-op since the work is done
616 * at the zone layer instead.
617 */
618 cflags |= SKMEM_CR_CLEARONFREE;
619 } else {
620 objalign = region->skr_align;
621 }
622
623 ASSERT(region != NULL);
624 ASSERT(!(region->skr_mode & SKR_MODE_MIRRORED));
625 segsize = region->skr_seg_size;
626 ASSERT(bufalign <= segsize);
627
628 buf = zalloc_flags(skm_zone, Z_WAITOK | Z_ZERO);
629 #if KASAN
630 /*
631 * In case we didn't get a cache-aligned memory, round it up
632 * accordingly. This is needed in order to get the rest of
633 * structure members aligned properly. It also means that
634 * the memory span gets shifted due to the round up, but it
635 * is okay since we've allocated extra space for this.
636 */
637 skm = (struct skmem_cache *)
638 P2ROUNDUP((intptr_t)buf + sizeof(void *), CHANNEL_CACHE_ALIGN_MAX);
639 void **pbuf = (void **)((intptr_t)skm - sizeof(void *));
640 *pbuf = buf;
641 #else /* !KASAN */
642 /*
643 * We expect that the zone allocator would allocate elements
644 * rounded up to the requested alignment based on the object
645 * size computed in skmem_cache_pre_init() earlier, and
646 * 'skm' is therefore the element address itself.
647 */
648 skm = buf;
649 #endif /* !KASAN */
650 VERIFY(IS_P2ALIGNED(skm, CHANNEL_CACHE_ALIGN_MAX));
651
652 if ((skmem_debug & SKMEM_DEBUG_NOMAGAZINES) ||
653 (cflags & SKMEM_CR_NOMAGAZINES)) {
654 /*
655 * Either the caller insists that this cache should not
656 * utilize magazines layer, or that the system override
657 * to disable magazines layer on all caches has been set.
658 */
659 skm->skm_mode |= SKM_MODE_NOMAGAZINES;
660 } else {
661 /*
662 * Region must be configured with enough objects
663 * to take into account objects at the CPU layer.
664 */
665 ASSERT(!(region->skr_mode & SKR_MODE_NOMAGAZINES));
666 }
667
668 if (cflags & SKMEM_CR_DYNAMIC) {
669 /*
670 * Enable per-CPU cache magazine resizing.
671 */
672 skm->skm_mode |= SKM_MODE_DYNAMIC;
673 }
674
675 /* region stays around after defunct? */
676 if (region->skr_mode & SKR_MODE_NOREDIRECT) {
677 skm->skm_mode |= SKM_MODE_NOREDIRECT;
678 }
679
680 if (cflags & SKMEM_CR_BATCH) {
681 /*
682 * Batch alloc/free involves storing the next object
683 * pointer at the beginning of each object; this is
684 * okay for kernel-only regions, but not those that
685 * are mappable to user space (we can't leak kernel
686 * addresses).
687 */
688 _CASSERT(offsetof(struct skmem_obj, mo_next) == 0);
689 VERIFY(!(region->skr_mode & SKR_MODE_MMAPOK));
690
691 /* batching is currently not supported on pseudo regions */
692 VERIFY(!(region->skr_mode & SKR_MODE_PSEUDO));
693
694 /* validate object size */
695 VERIFY(region->skr_c_obj_size >= sizeof(struct skmem_obj));
696
697 skm->skm_mode |= SKM_MODE_BATCH;
698 }
699
700 uuid_generate_random(skm->skm_uuid);
701 (void) snprintf(skm->skm_name, sizeof(skm->skm_name),
702 "%s.%s", SKMEM_CACHE_PREFIX, name);
703 skm->skm_bufsize = bufsize;
704 skm->skm_bufalign = bufalign;
705 skm->skm_objalign = objalign;
706 skm->skm_ctor = ctor;
707 skm->skm_dtor = dtor;
708 skm->skm_reclaim = reclaim;
709 skm->skm_private = private;
710 skm->skm_slabsize = segsize;
711
712 skm->skm_region = region;
713 /* callee holds reference */
714 skmem_region_slab_config(region, skm, true);
715 objsize = region->skr_c_obj_size;
716 skm->skm_objsize = objsize;
717
718 if (pseudo) {
719 /*
720 * Release reference from skmem_region_create()
721 * since skm->skm_region holds one now.
722 */
723 ASSERT(region->skr_mode & SKR_MODE_PSEUDO);
724 skmem_region_release(region);
725
726 skm->skm_mode |= SKM_MODE_PSEUDO;
727
728 skm->skm_slab_alloc = skmem_slab_alloc_pseudo_locked;
729 skm->skm_slab_free = skmem_slab_free_pseudo_locked;
730 } else {
731 skm->skm_slab_alloc = skmem_slab_alloc_locked;
732 skm->skm_slab_free = skmem_slab_free_locked;
733
734 /* auditing was requested? (normal regions only) */
735 if (skmem_debug & SKMEM_DEBUG_AUDIT) {
736 ASSERT(bc_size == sizeof(struct skmem_bufctl_audit));
737 skm->skm_mode |= SKM_MODE_AUDIT;
738 }
739 }
740
741 /*
742 * Clear upon free (to slab layer) as long as the region is
743 * not marked as read-only for kernel, and if the chunk size
744 * is within the threshold or if the caller had requested it.
745 */
746 if (!(region->skr_mode & SKR_MODE_KREADONLY)) {
747 if (skm->skm_objsize <= skmem_clear_min ||
748 (cflags & SKMEM_CR_CLEARONFREE)) {
749 skm->skm_mode |= SKM_MODE_CLEARONFREE;
750 }
751 }
752
753 chunksize = bufsize;
754 if (bufalign >= SKMEM_CACHE_ALIGN) {
755 chunksize = P2ROUNDUP(chunksize, SKMEM_CACHE_ALIGN);
756 }
757
758 chunksize = P2ROUNDUP(chunksize, bufalign);
759 if (chunksize > objsize) {
760 panic("%s: (bufsize %lu, chunksize %lu) > objsize %lu",
761 __func__, bufsize, chunksize, objsize);
762 /* NOTREACHED */
763 __builtin_unreachable();
764 }
765 ASSERT(chunksize != 0);
766 skm->skm_chunksize = chunksize;
767
768 lck_mtx_init(&skm->skm_sl_lock, &skmem_sl_lock_grp, &skmem_lock_attr);
769 TAILQ_INIT(&skm->skm_sl_partial_list);
770 TAILQ_INIT(&skm->skm_sl_empty_list);
771
772 /* allocated-address hash table */
773 skm->skm_hash_initial = SKMEM_CACHE_HASH_INITIAL;
774 skm->skm_hash_limit = SKMEM_CACHE_HASH_LIMIT;
775 skm->skm_hash_table = sk_alloc_type_array(struct skmem_bufctl_bkt,
776 skm->skm_hash_initial, Z_WAITOK | Z_NOFAIL, skmem_tag_bufctl_hash);
777
778 skm->skm_hash_mask = (skm->skm_hash_initial - 1);
779 skm->skm_hash_shift = flsll(chunksize) - 1;
780
781 for (i = 0; i < (skm->skm_hash_mask + 1); i++) {
782 SLIST_INIT(&skm->skm_hash_table[i].bcb_head);
783 }
784
785 lck_mtx_init(&skm->skm_dp_lock, &skmem_dp_lock_grp, &skmem_lock_attr);
786
787 /* find a suitable magazine type for this chunk size */
788 for (mtp = skmem_magtype; chunksize <= mtp->mt_minbuf; mtp++) {
789 continue;
790 }
791
792 skm->skm_magtype = mtp;
793 if (!(skm->skm_mode & SKM_MODE_NOMAGAZINES)) {
794 skm->skm_cpu_mag_size = skm->skm_magtype->mt_magsize;
795 }
796
797 /*
798 * Initialize the CPU layer. Each per-CPU structure is aligned
799 * on the CPU cache line boundary to prevent false sharing.
800 */
801 lck_mtx_init(&skm->skm_rs_lock, &skmem_cpu_lock_grp, &skmem_lock_attr);
802 for (cpuid = 0; cpuid < ncpu; cpuid++) {
803 struct skmem_cpu_cache *ccp = &skm->skm_cpu_cache[cpuid];
804
805 VERIFY(IS_P2ALIGNED(ccp, CHANNEL_CACHE_ALIGN_MAX));
806 lck_mtx_init(&ccp->cp_lock, &skmem_cpu_lock_grp,
807 &skmem_lock_attr);
808 ccp->cp_rounds = -1;
809 ccp->cp_prounds = -1;
810 }
811
812 SKMEM_CACHE_LOCK();
813 TAILQ_INSERT_TAIL(&skmem_cache_head, skm, skm_link);
814 SKMEM_CACHE_UNLOCK();
815
816 SK_DF(SK_VERB_MEM_CACHE, "\"%s\": skm 0x%llx mode 0x%b",
817 skm->skm_name, SK_KVA(skm), skm->skm_mode, SKM_MODE_BITS);
818 SK_DF(SK_VERB_MEM_CACHE,
819 " bufsz %u bufalign %u chunksz %u objsz %u slabsz %u",
820 (uint32_t)skm->skm_bufsize, (uint32_t)skm->skm_bufalign,
821 (uint32_t)skm->skm_chunksize, (uint32_t)skm->skm_objsize,
822 (uint32_t)skm->skm_slabsize);
823
824 if (skmem_cache_ready) {
825 skmem_cache_magazine_enable(skm, 0);
826 }
827
828 return skm;
829 }
830
831 /*
832 * Destroy a cache.
833 */
834 void
skmem_cache_destroy(struct skmem_cache * skm)835 skmem_cache_destroy(struct skmem_cache *skm)
836 {
837 uint32_t cpuid;
838
839 SKMEM_CACHE_LOCK();
840 TAILQ_REMOVE(&skmem_cache_head, skm, skm_link);
841 SKMEM_CACHE_UNLOCK();
842
843 ASSERT(skm->skm_rs_busy == 0);
844 ASSERT(skm->skm_rs_want == 0);
845
846 /* purge all cached objects for this cache */
847 skmem_cache_magazine_purge(skm);
848
849 /*
850 * Panic if we detect there are unfreed objects; the caller
851 * destroying this cache is responsible for ensuring that all
852 * allocated objects have been freed prior to getting here.
853 */
854 SKM_SLAB_LOCK(skm);
855 if (skm->skm_sl_bufinuse != 0) {
856 panic("%s: '%s' (%p) not empty (%llu unfreed)", __func__,
857 skm->skm_name, (void *)skm, skm->skm_sl_bufinuse);
858 /* NOTREACHED */
859 __builtin_unreachable();
860 }
861 ASSERT(TAILQ_EMPTY(&skm->skm_sl_partial_list));
862 ASSERT(skm->skm_sl_partial == 0);
863 ASSERT(TAILQ_EMPTY(&skm->skm_sl_empty_list));
864 ASSERT(skm->skm_sl_empty == 0);
865 skm->skm_reclaim = NULL;
866 skm->skm_ctor = NULL;
867 skm->skm_dtor = NULL;
868 SKM_SLAB_UNLOCK(skm);
869
870 if (skm->skm_hash_table != NULL) {
871 #if (DEBUG || DEVELOPMENT)
872 for (uint32_t i = 0; i < (skm->skm_hash_mask + 1); i++) {
873 ASSERT(SLIST_EMPTY(&skm->skm_hash_table[i].bcb_head));
874 }
875 #endif /* DEBUG || DEVELOPMENT */
876
877 sk_free_type_array(struct skmem_bufctl_bkt,
878 skm->skm_hash_mask + 1, skm->skm_hash_table);
879 skm->skm_hash_table = NULL;
880 }
881
882 for (cpuid = 0; cpuid < ncpu; cpuid++) {
883 lck_mtx_destroy(&skm->skm_cpu_cache[cpuid].cp_lock,
884 &skmem_cpu_lock_grp);
885 }
886 lck_mtx_destroy(&skm->skm_rs_lock, &skmem_cpu_lock_grp);
887 lck_mtx_destroy(&skm->skm_dp_lock, &skmem_dp_lock_grp);
888 lck_mtx_destroy(&skm->skm_sl_lock, &skmem_sl_lock_grp);
889
890 SK_DF(SK_VERB_MEM_CACHE, "\"%s\": skm 0x%llx",
891 skm->skm_name, SK_KVA(skm));
892
893 /* callee releases reference */
894 skmem_region_slab_config(skm->skm_region, skm, false);
895 skm->skm_region = NULL;
896
897 #if KASAN
898 /* get the original address since we're about to free it */
899 void **pbuf = (void **)((intptr_t)skm - sizeof(void *));
900 skm = *pbuf;
901 #endif /* KASAN */
902
903 zfree(skm_zone, skm);
904 }
905
906 /*
907 * Create a slab.
908 */
909 static struct skmem_slab *
skmem_slab_create(struct skmem_cache * skm,uint32_t skmflag)910 skmem_slab_create(struct skmem_cache *skm, uint32_t skmflag)
911 {
912 struct skmem_region *skr = skm->skm_region;
913 uint32_t objsize, chunks;
914 size_t slabsize = skm->skm_slabsize;
915 struct skmem_slab *sl;
916 struct sksegment *sg, *sgm;
917 char *buf, *bufm, *slab, *slabm;
918
919 /*
920 * Allocate a segment (a slab at our layer) from the region.
921 */
922 slab = skmem_region_alloc(skr, (void **)&slabm, &sg, &sgm, skmflag);
923 if (slab == NULL) {
924 goto rg_alloc_failure;
925 }
926
927 if ((sl = skmem_cache_alloc(skmem_slab_cache, SKMEM_SLEEP)) == NULL) {
928 goto slab_alloc_failure;
929 }
930
931 ASSERT(sg != NULL);
932 ASSERT(sgm == NULL || sgm->sg_index == sg->sg_index);
933
934 bzero(sl, sizeof(*sl));
935 sl->sl_cache = skm;
936 sl->sl_base = buf = slab;
937 sl->sl_basem = bufm = slabm;
938 ASSERT(skr->skr_c_obj_size <= UINT32_MAX);
939 objsize = (uint32_t)skr->skr_c_obj_size;
940 ASSERT(skm->skm_objsize == objsize);
941 ASSERT((slabsize / objsize) <= UINT32_MAX);
942 sl->sl_chunks = chunks = (uint32_t)(slabsize / objsize);
943 sl->sl_seg = sg;
944 sl->sl_segm = sgm;
945
946 /*
947 * Create one or more buffer control structures for the slab,
948 * each one tracking a chunk of raw object from the segment,
949 * and insert these into the slab's list of buffer controls.
950 */
951 ASSERT(chunks > 0);
952 while (chunks != 0) {
953 struct skmem_bufctl *bc;
954
955 bc = skmem_cache_alloc(skmem_bufctl_cache, SKMEM_SLEEP);
956 if (bc == NULL) {
957 goto bufctl_alloc_failure;
958 }
959
960 bzero(bc, bc_size);
961 bc->bc_addr = buf;
962 bc->bc_addrm = bufm;
963 bc->bc_slab = sl;
964 bc->bc_idx = (sl->sl_chunks - chunks);
965 if (skr->skr_mode & SKR_MODE_SHAREOK) {
966 bc->bc_flags |= SKMEM_BUFCTL_SHAREOK;
967 }
968 SLIST_INSERT_HEAD(&sl->sl_head, bc, bc_link);
969 bc->bc_lim = objsize;
970 buf += objsize;
971 if (bufm != NULL) {
972 bufm += objsize;
973 }
974 --chunks;
975 }
976
977 SK_DF(SK_VERB_MEM_CACHE, "skm 0x%llx sl 0x%llx",
978 SK_KVA(skm), SK_KVA(sl));
979 SK_DF(SK_VERB_MEM_CACHE, " [%u] [0x%llx-0x%llx)", sl->sl_seg->sg_index,
980 SK_KVA(slab), SK_KVA(slab + objsize));
981
982 return sl;
983
984 bufctl_alloc_failure:
985 skmem_slab_destroy(skm, sl);
986
987 slab_alloc_failure:
988 skmem_region_free(skr, slab, slabm);
989
990 rg_alloc_failure:
991 atomic_add_64(&skm->skm_sl_alloc_fail, 1);
992
993 return NULL;
994 }
995
996 /*
997 * Destroy a slab.
998 */
999 static void
skmem_slab_destroy(struct skmem_cache * skm,struct skmem_slab * sl)1000 skmem_slab_destroy(struct skmem_cache *skm, struct skmem_slab *sl)
1001 {
1002 struct skmem_bufctl *bc, *tbc;
1003 void *slab = sl->sl_base;
1004 void *slabm = sl->sl_basem;
1005
1006 ASSERT(sl->sl_refcnt == 0);
1007
1008 SK_DF(SK_VERB_MEM_CACHE, "skm 0x%llx sl 0x%llx",
1009 SK_KVA(skm), SK_KVA(sl));
1010 SK_DF(SK_VERB_MEM_CACHE, " [%u] [0x%llx-0x%llx)", sl->sl_seg->sg_index,
1011 SK_KVA(slab), SK_KVA((uintptr_t)slab + skm->skm_objsize));
1012
1013 /*
1014 * Go through the slab's list of buffer controls and free
1015 * them, and then free the slab itself back to its cache.
1016 */
1017 SLIST_FOREACH_SAFE(bc, &sl->sl_head, bc_link, tbc) {
1018 SLIST_REMOVE(&sl->sl_head, bc, skmem_bufctl, bc_link);
1019 skmem_cache_free(skmem_bufctl_cache, bc);
1020 }
1021 skmem_cache_free(skmem_slab_cache, sl);
1022
1023 /* and finally free the segment back to the backing region */
1024 skmem_region_free(skm->skm_region, slab, slabm);
1025 }
1026
1027 /*
1028 * Allocate a raw object from the (locked) slab layer. Normal region variant.
1029 */
1030 static int
skmem_slab_alloc_locked(struct skmem_cache * skm,struct skmem_obj_info * oi,struct skmem_obj_info * oim,uint32_t skmflag)1031 skmem_slab_alloc_locked(struct skmem_cache *skm, struct skmem_obj_info *oi,
1032 struct skmem_obj_info *oim, uint32_t skmflag)
1033 {
1034 struct skmem_bufctl_bkt *bcb;
1035 struct skmem_bufctl *bc;
1036 struct skmem_slab *sl;
1037 uint32_t retries = 0;
1038 uint64_t boff_total = 0; /* in usec */
1039 uint64_t boff = 0; /* in msec */
1040 boolean_t new_slab;
1041 void *buf;
1042
1043 /* this flag is not for the caller to set */
1044 VERIFY(!(skmflag & SKMEM_FAILOK));
1045
1046 /*
1047 * A slab is either in a partially-allocated list (at least it has
1048 * a free object available), or is in the empty list (everything
1049 * has been allocated.) If we can't find a partially-allocated
1050 * slab, then we need to allocate a slab (segment) from the region.
1051 */
1052 again:
1053 SKM_SLAB_LOCK_ASSERT_HELD(skm);
1054 sl = TAILQ_FIRST(&skm->skm_sl_partial_list);
1055 if (sl == NULL) {
1056 uint32_t flags = skmflag;
1057 boolean_t retry;
1058
1059 ASSERT(skm->skm_sl_partial == 0);
1060 SKM_SLAB_UNLOCK(skm);
1061 if (!(flags & SKMEM_NOSLEEP)) {
1062 /*
1063 * Pick up a random value to start the exponential
1064 * backoff, if this is the first round, or if the
1065 * current value is over the threshold. Otherwise,
1066 * double the backoff value.
1067 */
1068 if (boff == 0 || boff > SKMEM_SLAB_BACKOFF_THRES) {
1069 read_frandom(&boff, sizeof(boff));
1070 boff = (boff % SKMEM_SLAB_BACKOFF_RANDOM) + 1;
1071 ASSERT(boff > 0);
1072 } else if (os_mul_overflow(boff, 2, &boff)) {
1073 panic_plain("\"%s\": boff counter "
1074 "overflows\n", skm->skm_name);
1075 /* NOTREACHED */
1076 __builtin_unreachable();
1077 }
1078 /* add this value (in msec) to the total (in usec) */
1079 if (os_add_overflow(boff_total,
1080 (boff * NSEC_PER_USEC), &boff_total)) {
1081 panic_plain("\"%s\": boff_total counter "
1082 "overflows\n", skm->skm_name);
1083 /* NOTREACHED */
1084 __builtin_unreachable();
1085 }
1086 }
1087 /*
1088 * In the event of a race between multiple threads trying
1089 * to create the last remaining (or the only) slab, let the
1090 * loser(s) attempt to retry after waiting a bit. The winner
1091 * would have inserted the newly-created slab into the list.
1092 */
1093 if (!(flags & SKMEM_NOSLEEP) &&
1094 boff_total <= SKMEM_SLAB_MAX_BACKOFF) {
1095 retry = TRUE;
1096 ++retries;
1097 flags |= SKMEM_FAILOK;
1098 } else {
1099 if (!(flags & SKMEM_NOSLEEP)) {
1100 panic_plain("\"%s\": failed to allocate "
1101 "slab (sleeping mode) after %llu "
1102 "msec, %u retries\n\n%s", skm->skm_name,
1103 (boff_total / NSEC_PER_USEC), retries,
1104 skmem_dump(skm->skm_region));
1105 /* NOTREACHED */
1106 __builtin_unreachable();
1107 }
1108 retry = FALSE;
1109 }
1110
1111 /*
1112 * Create a new slab.
1113 */
1114 if ((sl = skmem_slab_create(skm, flags)) == NULL) {
1115 if (retry) {
1116 SK_ERR("\"%s\": failed to allocate "
1117 "slab (%ssleeping mode): waiting for %llu "
1118 "msec, total %llu msec, %u retries",
1119 skm->skm_name,
1120 (flags & SKMEM_NOSLEEP) ? "non-" : "",
1121 boff, (boff_total / NSEC_PER_USEC), retries);
1122 VERIFY(boff > 0 && ((uint32_t)boff <=
1123 (SKMEM_SLAB_BACKOFF_THRES * 2)));
1124 delay((uint32_t)boff * NSEC_PER_USEC);
1125 SKM_SLAB_LOCK(skm);
1126 goto again;
1127 } else {
1128 SK_RDERR(4, "\"%s\": failed to allocate slab "
1129 "(%ssleeping mode)", skm->skm_name,
1130 (flags & SKMEM_NOSLEEP) ? "non-" : "");
1131 SKM_SLAB_LOCK(skm);
1132 }
1133 return ENOMEM;
1134 }
1135
1136 SKM_SLAB_LOCK(skm);
1137 skm->skm_sl_create++;
1138 if ((skm->skm_sl_bufinuse += sl->sl_chunks) >
1139 skm->skm_sl_bufmax) {
1140 skm->skm_sl_bufmax = skm->skm_sl_bufinuse;
1141 }
1142 }
1143 skm->skm_sl_alloc++;
1144
1145 new_slab = (sl->sl_refcnt == 0);
1146 ASSERT(new_slab || SKMEM_SLAB_IS_PARTIAL(sl));
1147
1148 sl->sl_refcnt++;
1149 ASSERT(sl->sl_refcnt <= sl->sl_chunks);
1150
1151 /*
1152 * We either have a new slab, or a partially-allocated one.
1153 * Remove a buffer control from the slab, and insert it to
1154 * the allocated-address hash chain.
1155 */
1156 bc = SLIST_FIRST(&sl->sl_head);
1157 ASSERT(bc != NULL);
1158 SLIST_REMOVE(&sl->sl_head, bc, skmem_bufctl, bc_link);
1159
1160 /* sanity check */
1161 VERIFY(bc->bc_usecnt == 0);
1162
1163 /*
1164 * Also store the master object's region info for the caller.
1165 */
1166 bzero(oi, sizeof(*oi));
1167 SKMEM_OBJ_ADDR(oi) = buf = bc->bc_addr;
1168 SKMEM_OBJ_BUFCTL(oi) = bc; /* master only; NULL for slave */
1169 ASSERT(skm->skm_objsize <= UINT32_MAX);
1170 SKMEM_OBJ_SIZE(oi) = (uint32_t)skm->skm_objsize;
1171 SKMEM_OBJ_IDX_REG(oi) =
1172 ((sl->sl_seg->sg_index * sl->sl_chunks) + bc->bc_idx);
1173 SKMEM_OBJ_IDX_SEG(oi) = bc->bc_idx;
1174 /*
1175 * And for slave object.
1176 */
1177 if (oim != NULL) {
1178 bzero(oim, sizeof(*oim));
1179 if (bc->bc_addrm != NULL) {
1180 SKMEM_OBJ_ADDR(oim) = bc->bc_addrm;
1181 SKMEM_OBJ_SIZE(oim) = SKMEM_OBJ_SIZE(oi);
1182 SKMEM_OBJ_IDX_REG(oim) = SKMEM_OBJ_IDX_REG(oi);
1183 SKMEM_OBJ_IDX_SEG(oim) = SKMEM_OBJ_IDX_SEG(oi);
1184 }
1185 }
1186
1187 if (skm->skm_mode & SKM_MODE_BATCH) {
1188 ((struct skmem_obj *)buf)->mo_next = NULL;
1189 }
1190
1191 /* insert to allocated-address hash chain */
1192 bcb = SKMEM_CACHE_HASH(skm, buf);
1193 SLIST_INSERT_HEAD(&bcb->bcb_head, bc, bc_link);
1194
1195 if (SLIST_EMPTY(&sl->sl_head)) {
1196 /*
1197 * If that was the last buffer control from this slab,
1198 * insert the slab into the empty list. If it was in
1199 * the partially-allocated list, then remove the slab
1200 * from there as well.
1201 */
1202 ASSERT(sl->sl_refcnt == sl->sl_chunks);
1203 if (new_slab) {
1204 ASSERT(sl->sl_chunks == 1);
1205 } else {
1206 ASSERT(sl->sl_chunks > 1);
1207 ASSERT(skm->skm_sl_partial > 0);
1208 skm->skm_sl_partial--;
1209 TAILQ_REMOVE(&skm->skm_sl_partial_list, sl, sl_link);
1210 }
1211 skm->skm_sl_empty++;
1212 ASSERT(skm->skm_sl_empty != 0);
1213 TAILQ_INSERT_HEAD(&skm->skm_sl_empty_list, sl, sl_link);
1214 } else {
1215 /*
1216 * The slab is not empty; if it was newly allocated
1217 * above, then it's not in the partially-allocated
1218 * list and so we insert it there.
1219 */
1220 ASSERT(SKMEM_SLAB_IS_PARTIAL(sl));
1221 if (new_slab) {
1222 skm->skm_sl_partial++;
1223 ASSERT(skm->skm_sl_partial != 0);
1224 TAILQ_INSERT_HEAD(&skm->skm_sl_partial_list,
1225 sl, sl_link);
1226 }
1227 }
1228
1229 /* if auditing is enabled, record this transaction */
1230 if (__improbable((skm->skm_mode & SKM_MODE_AUDIT) != 0)) {
1231 skmem_audit_bufctl(bc);
1232 }
1233
1234 return 0;
1235 }
1236
1237 /*
1238 * Allocate a raw object from the (locked) slab layer. Pseudo region variant.
1239 */
1240 static int
skmem_slab_alloc_pseudo_locked(struct skmem_cache * skm,struct skmem_obj_info * oi,struct skmem_obj_info * oim,uint32_t skmflag)1241 skmem_slab_alloc_pseudo_locked(struct skmem_cache *skm,
1242 struct skmem_obj_info *oi, struct skmem_obj_info *oim, uint32_t skmflag)
1243 {
1244 zalloc_flags_t zflags = (skmflag & SKMEM_NOSLEEP) ? Z_NOWAIT : Z_WAITOK;
1245 struct skmem_region *skr = skm->skm_region;
1246 void *obj, *buf;
1247
1248 /* this flag is not for the caller to set */
1249 VERIFY(!(skmflag & SKMEM_FAILOK));
1250
1251 SKM_SLAB_LOCK_ASSERT_HELD(skm);
1252
1253 ASSERT(skr->skr_reg == NULL && skr->skr_zreg != NULL);
1254 /* mirrored region is not applicable */
1255 ASSERT(!(skr->skr_mode & SKR_MODE_MIRRORED));
1256 /* batching is not yet supported */
1257 ASSERT(!(skm->skm_mode & SKM_MODE_BATCH));
1258
1259 if ((obj = zalloc_flags(skr->skr_zreg, zflags | Z_ZERO)) == NULL) {
1260 atomic_add_64(&skm->skm_sl_alloc_fail, 1);
1261 return ENOMEM;
1262 }
1263
1264 #if KASAN
1265 /*
1266 * Perform some fix-ups since the zone element isn't guaranteed
1267 * to be on the aligned boundary. The effective object size
1268 * has been adjusted accordingly by skmem_region_create() earlier
1269 * at cache creation time.
1270 *
1271 * 'buf' is get the aligned address for this object.
1272 */
1273 buf = (void *)P2ROUNDUP((intptr_t)obj + sizeof(u_int64_t),
1274 skm->skm_bufalign);
1275
1276 /*
1277 * Wind back a pointer size from the aligned address and
1278 * save the original address so we can free it later.
1279 */
1280 void **pbuf = (void **)((intptr_t)buf - sizeof(void *));
1281 *pbuf = obj;
1282
1283 VERIFY(((intptr_t)buf + skm->skm_bufsize) <=
1284 ((intptr_t)obj + skm->skm_objsize));
1285 #else /* !KASAN */
1286 /*
1287 * We expect that the zone allocator would allocate elements
1288 * rounded up to the requested alignment based on the effective
1289 * object size computed in skmem_region_create() earlier, and
1290 * 'buf' is therefore the element address itself.
1291 */
1292 buf = obj;
1293 #endif /* !KASAN */
1294
1295 /* make sure the object is aligned */
1296 VERIFY(IS_P2ALIGNED(buf, skm->skm_bufalign));
1297
1298 /*
1299 * Return the object's info to the caller.
1300 */
1301 bzero(oi, sizeof(*oi));
1302 SKMEM_OBJ_ADDR(oi) = buf;
1303 ASSERT(skm->skm_objsize <= UINT32_MAX);
1304 SKMEM_OBJ_SIZE(oi) = (uint32_t)skm->skm_objsize;
1305 if (oim != NULL) {
1306 bzero(oim, sizeof(*oim));
1307 }
1308
1309 skm->skm_sl_alloc++;
1310 skm->skm_sl_bufinuse++;
1311 if (skm->skm_sl_bufinuse > skm->skm_sl_bufmax) {
1312 skm->skm_sl_bufmax = skm->skm_sl_bufinuse;
1313 }
1314
1315 return 0;
1316 }
1317
1318 /*
1319 * Allocate a raw object from the slab layer.
1320 */
1321 static int
skmem_slab_alloc(struct skmem_cache * skm,struct skmem_obj_info * oi,struct skmem_obj_info * oim,uint32_t skmflag)1322 skmem_slab_alloc(struct skmem_cache *skm, struct skmem_obj_info *oi,
1323 struct skmem_obj_info *oim, uint32_t skmflag)
1324 {
1325 int err;
1326
1327 SKM_SLAB_LOCK(skm);
1328 err = skm->skm_slab_alloc(skm, oi, oim, skmflag);
1329 SKM_SLAB_UNLOCK(skm);
1330
1331 return err;
1332 }
1333
1334 /*
1335 * Allocate raw object(s) from the slab layer.
1336 */
1337 static uint32_t
skmem_slab_batch_alloc(struct skmem_cache * skm,struct skmem_obj ** list,uint32_t num,uint32_t skmflag)1338 skmem_slab_batch_alloc(struct skmem_cache *skm, struct skmem_obj **list,
1339 uint32_t num, uint32_t skmflag)
1340 {
1341 uint32_t need = num;
1342
1343 ASSERT(list != NULL && (skm->skm_mode & SKM_MODE_BATCH));
1344 *list = NULL;
1345
1346 SKM_SLAB_LOCK(skm);
1347 for (;;) {
1348 struct skmem_obj_info oi, oim;
1349
1350 /*
1351 * Get a single raw object from the slab layer.
1352 */
1353 if (skm->skm_slab_alloc(skm, &oi, &oim, skmflag) != 0) {
1354 break;
1355 }
1356
1357 *list = SKMEM_OBJ_ADDR(&oi);
1358 ASSERT((*list)->mo_next == NULL);
1359 /* store these inside the object itself */
1360 (*list)->mo_info = oi;
1361 (*list)->mo_minfo = oim;
1362 list = &(*list)->mo_next;
1363
1364 ASSERT(need != 0);
1365 if (--need == 0) {
1366 break;
1367 }
1368 }
1369 SKM_SLAB_UNLOCK(skm);
1370
1371 return num - need;
1372 }
1373
1374 /*
1375 * Free a raw object to the (locked) slab layer. Normal region variant.
1376 */
1377 static void
skmem_slab_free_locked(struct skmem_cache * skm,void * buf)1378 skmem_slab_free_locked(struct skmem_cache *skm, void *buf)
1379 {
1380 struct skmem_bufctl *bc, *tbc;
1381 struct skmem_bufctl_bkt *bcb;
1382 struct skmem_slab *sl = NULL;
1383
1384 SKM_SLAB_LOCK_ASSERT_HELD(skm);
1385 ASSERT(buf != NULL);
1386 /* caller is expected to clear mo_next */
1387 ASSERT(!(skm->skm_mode & SKM_MODE_BATCH) ||
1388 ((struct skmem_obj *)buf)->mo_next == NULL);
1389
1390 /*
1391 * Search the hash chain to find a matching buffer control for the
1392 * given object address. If found, remove the buffer control from
1393 * the hash chain and insert it into the freelist. Otherwise, we
1394 * panic since the caller has given us a bogus address.
1395 */
1396 skm->skm_sl_free++;
1397 bcb = SKMEM_CACHE_HASH(skm, buf);
1398 SLIST_FOREACH_SAFE(bc, &bcb->bcb_head, bc_link, tbc) {
1399 if (bc->bc_addr == buf) {
1400 SLIST_REMOVE(&bcb->bcb_head, bc, skmem_bufctl, bc_link);
1401 sl = bc->bc_slab;
1402 break;
1403 }
1404 }
1405
1406 if (bc == NULL) {
1407 panic("%s: attempt to free invalid or already-freed obj %p "
1408 "on skm %p", __func__, buf, skm);
1409 /* NOTREACHED */
1410 __builtin_unreachable();
1411 }
1412 ASSERT(sl != NULL && sl->sl_cache == skm);
1413 VERIFY(SKMEM_SLAB_MEMBER(sl, buf));
1414
1415 /* make sure this object is not currently in use by another object */
1416 VERIFY(bc->bc_usecnt == 0);
1417
1418 /* if auditing is enabled, record this transaction */
1419 if (__improbable((skm->skm_mode & SKM_MODE_AUDIT) != 0)) {
1420 skmem_audit_bufctl(bc);
1421 }
1422
1423 /* if clear on free is requested, zero out the object */
1424 if (skm->skm_mode & SKM_MODE_CLEARONFREE) {
1425 bzero(buf, skm->skm_objsize);
1426 }
1427
1428 /* insert the buffer control to the slab's freelist */
1429 SLIST_INSERT_HEAD(&sl->sl_head, bc, bc_link);
1430
1431 ASSERT(sl->sl_refcnt >= 1);
1432 if (--sl->sl_refcnt == 0) {
1433 /*
1434 * If this was the last outstanding object for the slab,
1435 * remove the slab from the partially-allocated or empty
1436 * list, and destroy the slab (segment) back to the region.
1437 */
1438 if (sl->sl_chunks == 1) {
1439 ASSERT(skm->skm_sl_empty > 0);
1440 skm->skm_sl_empty--;
1441 TAILQ_REMOVE(&skm->skm_sl_empty_list, sl, sl_link);
1442 } else {
1443 ASSERT(skm->skm_sl_partial > 0);
1444 skm->skm_sl_partial--;
1445 TAILQ_REMOVE(&skm->skm_sl_partial_list, sl, sl_link);
1446 }
1447 ASSERT((int64_t)(skm->skm_sl_bufinuse - sl->sl_chunks) >= 0);
1448 skm->skm_sl_bufinuse -= sl->sl_chunks;
1449 skm->skm_sl_destroy++;
1450 SKM_SLAB_UNLOCK(skm);
1451 skmem_slab_destroy(skm, sl);
1452 SKM_SLAB_LOCK(skm);
1453 return;
1454 }
1455
1456 ASSERT(bc == SLIST_FIRST(&sl->sl_head));
1457 if (SLIST_NEXT(bc, bc_link) == NULL) {
1458 /*
1459 * If this is the first (potentially amongst many) object
1460 * that's returned to the slab, remove the slab from the
1461 * empty list and insert to end of the partially-allocated
1462 * list. This should help avoid thrashing the partial slab
1463 * since we avoid disturbing what's already at the front.
1464 */
1465 ASSERT(sl->sl_refcnt == (sl->sl_chunks - 1));
1466 ASSERT(sl->sl_chunks > 1);
1467 ASSERT(skm->skm_sl_empty > 0);
1468 skm->skm_sl_empty--;
1469 TAILQ_REMOVE(&skm->skm_sl_empty_list, sl, sl_link);
1470 skm->skm_sl_partial++;
1471 ASSERT(skm->skm_sl_partial != 0);
1472 TAILQ_INSERT_TAIL(&skm->skm_sl_partial_list, sl, sl_link);
1473 }
1474 }
1475
1476 /*
1477 * Free a raw object to the (locked) slab layer. Pseudo region variant.
1478 */
1479 static void
skmem_slab_free_pseudo_locked(struct skmem_cache * skm,void * buf)1480 skmem_slab_free_pseudo_locked(struct skmem_cache *skm, void *buf)
1481 {
1482 struct skmem_region *skr = skm->skm_region;
1483 void *obj = buf;
1484
1485 ASSERT(skr->skr_reg == NULL && skr->skr_zreg != NULL);
1486
1487 SKM_SLAB_LOCK_ASSERT_HELD(skm);
1488
1489 VERIFY(IS_P2ALIGNED(obj, skm->skm_bufalign));
1490
1491 #if KASAN
1492 /*
1493 * Since we stuffed the original zone element address before
1494 * the buffer address in KASAN mode, get it back since we're
1495 * about to free it.
1496 */
1497 void **pbuf = (void **)((intptr_t)obj - sizeof(void *));
1498
1499 VERIFY(((intptr_t)obj + skm->skm_bufsize) <=
1500 ((intptr_t)*pbuf + skm->skm_objsize));
1501
1502 obj = *pbuf;
1503 #endif /* KASAN */
1504
1505 /* free it to zone */
1506 zfree(skr->skr_zreg, obj);
1507
1508 skm->skm_sl_free++;
1509 ASSERT(skm->skm_sl_bufinuse > 0);
1510 skm->skm_sl_bufinuse--;
1511 }
1512
1513 /*
1514 * Free a raw object to the slab layer.
1515 */
1516 static void
skmem_slab_free(struct skmem_cache * skm,void * buf)1517 skmem_slab_free(struct skmem_cache *skm, void *buf)
1518 {
1519 if (skm->skm_mode & SKM_MODE_BATCH) {
1520 ((struct skmem_obj *)buf)->mo_next = NULL;
1521 }
1522
1523 SKM_SLAB_LOCK(skm);
1524 skm->skm_slab_free(skm, buf);
1525 SKM_SLAB_UNLOCK(skm);
1526 }
1527
1528 /*
1529 * Free raw object(s) to the slab layer.
1530 */
1531 static void
skmem_slab_batch_free(struct skmem_cache * skm,struct skmem_obj * list)1532 skmem_slab_batch_free(struct skmem_cache *skm, struct skmem_obj *list)
1533 {
1534 struct skmem_obj *listn;
1535
1536 ASSERT(list != NULL && (skm->skm_mode & SKM_MODE_BATCH));
1537
1538 SKM_SLAB_LOCK(skm);
1539 for (;;) {
1540 listn = list->mo_next;
1541 list->mo_next = NULL;
1542
1543 /*
1544 * Free a single object to the slab layer.
1545 */
1546 skm->skm_slab_free(skm, (void *)list);
1547
1548 /* if no more objects to free, we're done */
1549 if ((list = listn) == NULL) {
1550 break;
1551 }
1552 }
1553 SKM_SLAB_UNLOCK(skm);
1554 }
1555
1556 /*
1557 * Return the object's region info.
1558 */
1559 void
skmem_cache_get_obj_info(struct skmem_cache * skm,void * buf,struct skmem_obj_info * oi,struct skmem_obj_info * oim)1560 skmem_cache_get_obj_info(struct skmem_cache *skm, void *buf,
1561 struct skmem_obj_info *oi, struct skmem_obj_info *oim)
1562 {
1563 struct skmem_bufctl_bkt *bcb;
1564 struct skmem_bufctl *bc;
1565 struct skmem_slab *sl;
1566
1567 /*
1568 * Search the hash chain to find a matching buffer control for the
1569 * given object address. If not found, panic since the caller has
1570 * given us a bogus address.
1571 */
1572 SKM_SLAB_LOCK(skm);
1573 bcb = SKMEM_CACHE_HASH(skm, buf);
1574 SLIST_FOREACH(bc, &bcb->bcb_head, bc_link) {
1575 if (bc->bc_addr == buf) {
1576 break;
1577 }
1578 }
1579
1580 if (__improbable(bc == NULL)) {
1581 panic("%s: %s failed to get object info for %p",
1582 __func__, skm->skm_name, buf);
1583 /* NOTREACHED */
1584 __builtin_unreachable();
1585 }
1586
1587 /*
1588 * Return the master object's info to the caller.
1589 */
1590 sl = bc->bc_slab;
1591 SKMEM_OBJ_ADDR(oi) = bc->bc_addr;
1592 SKMEM_OBJ_BUFCTL(oi) = bc; /* master only; NULL for slave */
1593 ASSERT(skm->skm_objsize <= UINT32_MAX);
1594 SKMEM_OBJ_SIZE(oi) = (uint32_t)skm->skm_objsize;
1595 SKMEM_OBJ_IDX_REG(oi) =
1596 (sl->sl_seg->sg_index * sl->sl_chunks) + bc->bc_idx;
1597 SKMEM_OBJ_IDX_SEG(oi) = bc->bc_idx;
1598 /*
1599 * And for slave object.
1600 */
1601 if (oim != NULL) {
1602 bzero(oim, sizeof(*oim));
1603 if (bc->bc_addrm != NULL) {
1604 SKMEM_OBJ_ADDR(oim) = bc->bc_addrm;
1605 SKMEM_OBJ_SIZE(oim) = oi->oi_size;
1606 SKMEM_OBJ_IDX_REG(oim) = oi->oi_idx_reg;
1607 SKMEM_OBJ_IDX_SEG(oim) = oi->oi_idx_seg;
1608 }
1609 }
1610 SKM_SLAB_UNLOCK(skm);
1611 }
1612
1613 /*
1614 * Magazine constructor.
1615 */
1616 static int
skmem_magazine_ctor(struct skmem_obj_info * oi,struct skmem_obj_info * oim,void * arg,uint32_t skmflag)1617 skmem_magazine_ctor(struct skmem_obj_info *oi, struct skmem_obj_info *oim,
1618 void *arg, uint32_t skmflag)
1619 {
1620 #pragma unused(oim, skmflag)
1621 struct skmem_mag *mg = SKMEM_OBJ_ADDR(oi);
1622
1623 ASSERT(oim == NULL);
1624 ASSERT(arg != NULL);
1625
1626 /*
1627 * Store it in the magazine object since we'll
1628 * need to refer to it during magazine destroy;
1629 * we can't safely refer to skm_magtype as the
1630 * depot lock may not be acquired then.
1631 */
1632 mg->mg_magtype = arg;
1633
1634 return 0;
1635 }
1636
1637 /*
1638 * Destroy a magazine (free each object to the slab layer).
1639 */
1640 static void
skmem_magazine_destroy(struct skmem_cache * skm,struct skmem_mag * mg,int nrounds)1641 skmem_magazine_destroy(struct skmem_cache *skm, struct skmem_mag *mg,
1642 int nrounds)
1643 {
1644 int round;
1645
1646 for (round = 0; round < nrounds; round++) {
1647 void *buf = mg->mg_round[round];
1648 struct skmem_obj *next;
1649
1650 if (skm->skm_mode & SKM_MODE_BATCH) {
1651 next = ((struct skmem_obj *)buf)->mo_next;
1652 ((struct skmem_obj *)buf)->mo_next = NULL;
1653 }
1654
1655 /* deconstruct the object */
1656 if (skm->skm_dtor != NULL) {
1657 skm->skm_dtor(buf, skm->skm_private);
1658 }
1659
1660 /*
1661 * In non-batching mode, each object in the magazine has
1662 * no linkage to its neighbor, so free individual object
1663 * to the slab layer now.
1664 */
1665 if (!(skm->skm_mode & SKM_MODE_BATCH)) {
1666 skmem_slab_free(skm, buf);
1667 } else {
1668 ((struct skmem_obj *)buf)->mo_next = next;
1669 }
1670 }
1671
1672 /*
1673 * In batching mode, each object is linked to its neighbor at free
1674 * time, and so take the bottom-most object and free it to the slab
1675 * layer. Because of the way the list is reversed during free, this
1676 * will bring along the rest of objects above it.
1677 */
1678 if (nrounds > 0 && (skm->skm_mode & SKM_MODE_BATCH)) {
1679 skmem_slab_batch_free(skm, mg->mg_round[nrounds - 1]);
1680 }
1681
1682 /* free the magazine itself back to cache */
1683 skmem_cache_free(mg->mg_magtype->mt_cache, mg);
1684 }
1685
1686 /*
1687 * Get one or more magazines from the depot.
1688 */
1689 static uint32_t
skmem_depot_batch_alloc(struct skmem_cache * skm,struct skmem_maglist * ml,uint32_t * count,struct skmem_mag ** list,uint32_t num)1690 skmem_depot_batch_alloc(struct skmem_cache *skm, struct skmem_maglist *ml,
1691 uint32_t *count, struct skmem_mag **list, uint32_t num)
1692 {
1693 SLIST_HEAD(, skmem_mag) mg_list = SLIST_HEAD_INITIALIZER(mg_list);
1694 struct skmem_mag *mg;
1695 uint32_t need = num, c = 0;
1696
1697 ASSERT(list != NULL && need > 0);
1698
1699 if (!SKM_DEPOT_LOCK_TRY(skm)) {
1700 /*
1701 * Track the amount of lock contention here; if the contention
1702 * level is high (more than skmem_cache_depot_contention per a
1703 * given skmem_cache_update_interval interval), then we treat
1704 * it as a sign that the per-CPU layer is not using the right
1705 * magazine type, and that we'd need to resize it.
1706 */
1707 SKM_DEPOT_LOCK(skm);
1708 if (skm->skm_mode & SKM_MODE_DYNAMIC) {
1709 skm->skm_depot_contention++;
1710 }
1711 }
1712
1713 while ((mg = SLIST_FIRST(&ml->ml_list)) != NULL) {
1714 SLIST_REMOVE_HEAD(&ml->ml_list, mg_link);
1715 SLIST_INSERT_HEAD(&mg_list, mg, mg_link);
1716 ASSERT(ml->ml_total != 0);
1717 if (--ml->ml_total < ml->ml_min) {
1718 ml->ml_min = ml->ml_total;
1719 }
1720 c++;
1721 ml->ml_alloc++;
1722 if (--need == 0) {
1723 break;
1724 }
1725 }
1726 *count -= c;
1727
1728 SKM_DEPOT_UNLOCK(skm);
1729
1730 *list = SLIST_FIRST(&mg_list);
1731
1732 return num - need;
1733 }
1734
1735 /*
1736 * Return one or more magazines to the depot.
1737 */
1738 static void
skmem_depot_batch_free(struct skmem_cache * skm,struct skmem_maglist * ml,uint32_t * count,struct skmem_mag * mg)1739 skmem_depot_batch_free(struct skmem_cache *skm, struct skmem_maglist *ml,
1740 uint32_t *count, struct skmem_mag *mg)
1741 {
1742 struct skmem_mag *nmg;
1743 uint32_t c = 0;
1744
1745 SKM_DEPOT_LOCK(skm);
1746 while (mg != NULL) {
1747 nmg = SLIST_NEXT(mg, mg_link);
1748 SLIST_INSERT_HEAD(&ml->ml_list, mg, mg_link);
1749 ml->ml_total++;
1750 c++;
1751 mg = nmg;
1752 }
1753 *count += c;
1754 SKM_DEPOT_UNLOCK(skm);
1755 }
1756
1757 /*
1758 * Update the depot's working state statistics.
1759 */
1760 static void
skmem_depot_ws_update(struct skmem_cache * skm)1761 skmem_depot_ws_update(struct skmem_cache *skm)
1762 {
1763 SKM_DEPOT_LOCK_SPIN(skm);
1764 skm->skm_full.ml_reaplimit = skm->skm_full.ml_min;
1765 skm->skm_full.ml_min = skm->skm_full.ml_total;
1766 skm->skm_empty.ml_reaplimit = skm->skm_empty.ml_min;
1767 skm->skm_empty.ml_min = skm->skm_empty.ml_total;
1768 SKM_DEPOT_UNLOCK(skm);
1769 }
1770
1771 /*
1772 * Empty the depot's working state statistics (everything's reapable.)
1773 */
1774 static void
skmem_depot_ws_zero(struct skmem_cache * skm)1775 skmem_depot_ws_zero(struct skmem_cache *skm)
1776 {
1777 SKM_DEPOT_LOCK_SPIN(skm);
1778 if (skm->skm_full.ml_reaplimit != skm->skm_full.ml_total ||
1779 skm->skm_full.ml_min != skm->skm_full.ml_total ||
1780 skm->skm_empty.ml_reaplimit != skm->skm_empty.ml_total ||
1781 skm->skm_empty.ml_min != skm->skm_empty.ml_total) {
1782 skm->skm_full.ml_reaplimit = skm->skm_full.ml_total;
1783 skm->skm_full.ml_min = skm->skm_full.ml_total;
1784 skm->skm_empty.ml_reaplimit = skm->skm_empty.ml_total;
1785 skm->skm_empty.ml_min = skm->skm_empty.ml_total;
1786 skm->skm_depot_ws_zero++;
1787 }
1788 SKM_DEPOT_UNLOCK(skm);
1789 }
1790
1791 /*
1792 * Reap magazines that's outside of the working set.
1793 */
1794 static void
skmem_depot_ws_reap(struct skmem_cache * skm)1795 skmem_depot_ws_reap(struct skmem_cache *skm)
1796 {
1797 struct skmem_mag *mg, *nmg;
1798 uint32_t f, e, reap;
1799
1800 reap = f = MIN(skm->skm_full.ml_reaplimit, skm->skm_full.ml_min);
1801 if (reap != 0) {
1802 (void) skmem_depot_batch_alloc(skm, &skm->skm_full,
1803 &skm->skm_depot_full, &mg, reap);
1804 while (mg != NULL) {
1805 nmg = SLIST_NEXT(mg, mg_link);
1806 SLIST_NEXT(mg, mg_link) = NULL;
1807 skmem_magazine_destroy(skm, mg,
1808 mg->mg_magtype->mt_magsize);
1809 mg = nmg;
1810 }
1811 }
1812
1813 reap = e = MIN(skm->skm_empty.ml_reaplimit, skm->skm_empty.ml_min);
1814 if (reap != 0) {
1815 (void) skmem_depot_batch_alloc(skm, &skm->skm_empty,
1816 &skm->skm_depot_empty, &mg, reap);
1817 while (mg != NULL) {
1818 nmg = SLIST_NEXT(mg, mg_link);
1819 SLIST_NEXT(mg, mg_link) = NULL;
1820 skmem_magazine_destroy(skm, mg, 0);
1821 mg = nmg;
1822 }
1823 }
1824
1825 if (f != 0 || e != 0) {
1826 atomic_add_32(&skm->skm_cpu_mag_reap, 1);
1827 }
1828 }
1829
1830 /*
1831 * Performs periodic maintenance on a cache. This is serialized
1832 * through the update thread call, and so we guarantee there's at
1833 * most one update episode in the system at any given time.
1834 */
1835 static void
skmem_cache_update(struct skmem_cache * skm,uint32_t arg)1836 skmem_cache_update(struct skmem_cache *skm, uint32_t arg)
1837 {
1838 #pragma unused(arg)
1839 boolean_t resize_mag = FALSE;
1840 boolean_t rescale_hash = FALSE;
1841
1842 SKMEM_CACHE_LOCK_ASSERT_HELD();
1843
1844 /* insist that we are executing in the update thread call context */
1845 ASSERT(sk_is_cache_update_protected());
1846
1847 /*
1848 * If the cache has become much larger or smaller than the
1849 * allocated-address hash table, rescale the hash table.
1850 */
1851 SKM_SLAB_LOCK(skm);
1852 if ((skm->skm_sl_bufinuse > (skm->skm_hash_mask << 1) &&
1853 (skm->skm_hash_mask + 1) < skm->skm_hash_limit) ||
1854 (skm->skm_sl_bufinuse < (skm->skm_hash_mask >> 1) &&
1855 skm->skm_hash_mask > skm->skm_hash_initial)) {
1856 rescale_hash = TRUE;
1857 }
1858 SKM_SLAB_UNLOCK(skm);
1859
1860 /*
1861 * Update the working set.
1862 */
1863 skmem_depot_ws_update(skm);
1864
1865 /*
1866 * If the contention count is greater than the threshold during
1867 * the update interval, and if we are not already at the maximum
1868 * magazine size, increase it.
1869 */
1870 SKM_DEPOT_LOCK_SPIN(skm);
1871 if (skm->skm_chunksize < skm->skm_magtype->mt_maxbuf &&
1872 (int)(skm->skm_depot_contention - skm->skm_depot_contention_prev) >
1873 skmem_cache_depot_contention) {
1874 ASSERT(skm->skm_mode & SKM_MODE_DYNAMIC);
1875 resize_mag = TRUE;
1876 }
1877 skm->skm_depot_contention_prev = skm->skm_depot_contention;
1878 SKM_DEPOT_UNLOCK(skm);
1879
1880 if (rescale_hash) {
1881 skmem_cache_hash_rescale(skm);
1882 }
1883
1884 if (resize_mag) {
1885 skmem_cache_magazine_resize(skm);
1886 }
1887 }
1888
1889 /*
1890 * Reload the CPU's magazines with mg and its follower (if any).
1891 */
1892 static void
skmem_cpu_batch_reload(struct skmem_cpu_cache * cp,struct skmem_mag * mg,int rounds)1893 skmem_cpu_batch_reload(struct skmem_cpu_cache *cp, struct skmem_mag *mg,
1894 int rounds)
1895 {
1896 ASSERT((cp->cp_loaded == NULL && cp->cp_rounds == -1) ||
1897 (cp->cp_loaded && cp->cp_rounds + rounds == cp->cp_magsize));
1898 ASSERT(cp->cp_magsize > 0);
1899
1900 cp->cp_loaded = mg;
1901 cp->cp_rounds = rounds;
1902 if (__probable(SLIST_NEXT(mg, mg_link) != NULL)) {
1903 cp->cp_ploaded = SLIST_NEXT(mg, mg_link);
1904 cp->cp_prounds = rounds;
1905 SLIST_NEXT(mg, mg_link) = NULL;
1906 } else {
1907 ASSERT(SLIST_NEXT(mg, mg_link) == NULL);
1908 cp->cp_ploaded = NULL;
1909 cp->cp_prounds = -1;
1910 }
1911 }
1912
1913 /*
1914 * Reload the CPU's magazine with mg and save the previous one.
1915 */
1916 static void
skmem_cpu_reload(struct skmem_cpu_cache * cp,struct skmem_mag * mg,int rounds)1917 skmem_cpu_reload(struct skmem_cpu_cache *cp, struct skmem_mag *mg, int rounds)
1918 {
1919 ASSERT((cp->cp_loaded == NULL && cp->cp_rounds == -1) ||
1920 (cp->cp_loaded && cp->cp_rounds + rounds == cp->cp_magsize));
1921 ASSERT(cp->cp_magsize > 0);
1922
1923 cp->cp_ploaded = cp->cp_loaded;
1924 cp->cp_prounds = cp->cp_rounds;
1925 cp->cp_loaded = mg;
1926 cp->cp_rounds = rounds;
1927 }
1928
1929 /*
1930 * Allocate a constructed object from the cache.
1931 */
1932 void *
skmem_cache_alloc(struct skmem_cache * skm,uint32_t skmflag)1933 skmem_cache_alloc(struct skmem_cache *skm, uint32_t skmflag)
1934 {
1935 struct skmem_obj *buf;
1936
1937 (void) skmem_cache_batch_alloc(skm, &buf, 1, skmflag);
1938 return buf;
1939 }
1940
1941 /*
1942 * Allocate constructed object(s) from the cache.
1943 */
1944 uint32_t
skmem_cache_batch_alloc(struct skmem_cache * skm,struct skmem_obj ** list,uint32_t num,uint32_t skmflag)1945 skmem_cache_batch_alloc(struct skmem_cache *skm, struct skmem_obj **list,
1946 uint32_t num, uint32_t skmflag)
1947 {
1948 struct skmem_cpu_cache *cp = SKMEM_CPU_CACHE(skm);
1949 struct skmem_obj **top = &(*list);
1950 struct skmem_mag *mg;
1951 uint32_t need = num;
1952
1953 ASSERT(list != NULL);
1954 *list = NULL;
1955
1956 if (need == 0) {
1957 return 0;
1958 }
1959 ASSERT(need == 1 || (skm->skm_mode & SKM_MODE_BATCH));
1960
1961 SKM_CPU_LOCK(cp);
1962 for (;;) {
1963 /*
1964 * If we have an object in the current CPU's loaded
1965 * magazine, return it and we're done.
1966 */
1967 if (cp->cp_rounds > 0) {
1968 int objs = MIN((unsigned int)cp->cp_rounds, need);
1969 /*
1970 * In the SKM_MODE_BATCH case, objects in are already
1971 * linked together with the most recently freed object
1972 * at the head of the list; grab as many objects as we
1973 * can. Otherwise we'll just grab 1 object at most.
1974 */
1975 *list = cp->cp_loaded->mg_round[cp->cp_rounds - 1];
1976 cp->cp_rounds -= objs;
1977 cp->cp_alloc += objs;
1978
1979 if (skm->skm_mode & SKM_MODE_BATCH) {
1980 struct skmem_obj *tail =
1981 cp->cp_loaded->mg_round[cp->cp_rounds];
1982 list = &tail->mo_next;
1983 *list = NULL;
1984 }
1985
1986 /* if we got them all, return to caller */
1987 if ((need -= objs) == 0) {
1988 SKM_CPU_UNLOCK(cp);
1989 goto done;
1990 }
1991 }
1992
1993 /*
1994 * The CPU's loaded magazine is empty. If the previously
1995 * loaded magazine was full, exchange and try again.
1996 */
1997 if (cp->cp_prounds > 0) {
1998 skmem_cpu_reload(cp, cp->cp_ploaded, cp->cp_prounds);
1999 continue;
2000 }
2001
2002 /*
2003 * If the magazine layer is disabled, allocate from slab.
2004 * This can happen either because SKM_MODE_NOMAGAZINES is
2005 * set, or because we are resizing the magazine now.
2006 */
2007 if (cp->cp_magsize == 0) {
2008 break;
2009 }
2010
2011 /*
2012 * Both of the CPU's magazines are empty; try to get
2013 * full magazine(s) from the depot layer. Upon success,
2014 * reload and try again. To prevent potential thrashing,
2015 * replace both empty magazines only if the requested
2016 * count exceeds a magazine's worth of objects.
2017 */
2018 (void) skmem_depot_batch_alloc(skm, &skm->skm_full,
2019 &skm->skm_depot_full, &mg, (need <= cp->cp_magsize) ? 1 : 2);
2020 if (mg != NULL) {
2021 SLIST_HEAD(, skmem_mag) mg_list =
2022 SLIST_HEAD_INITIALIZER(mg_list);
2023
2024 if (cp->cp_ploaded != NULL) {
2025 SLIST_INSERT_HEAD(&mg_list, cp->cp_ploaded,
2026 mg_link);
2027 }
2028 if (SLIST_NEXT(mg, mg_link) == NULL) {
2029 /*
2030 * Depot allocation returns only 1 magazine;
2031 * retain current empty magazine.
2032 */
2033 skmem_cpu_reload(cp, mg, cp->cp_magsize);
2034 } else {
2035 /*
2036 * We got 2 full magazines from depot;
2037 * release the current empty magazine
2038 * back to the depot layer.
2039 */
2040 if (cp->cp_loaded != NULL) {
2041 SLIST_INSERT_HEAD(&mg_list,
2042 cp->cp_loaded, mg_link);
2043 }
2044 skmem_cpu_batch_reload(cp, mg, cp->cp_magsize);
2045 }
2046 skmem_depot_batch_free(skm, &skm->skm_empty,
2047 &skm->skm_depot_empty, SLIST_FIRST(&mg_list));
2048 continue;
2049 }
2050
2051 /*
2052 * The depot layer doesn't have any full magazines;
2053 * allocate directly from the slab layer.
2054 */
2055 break;
2056 }
2057 SKM_CPU_UNLOCK(cp);
2058
2059 if (__probable(num > 1 && (skm->skm_mode & SKM_MODE_BATCH) != 0)) {
2060 struct skmem_obj *rtop, *rlist, *rlistp = NULL;
2061 uint32_t rlistc, c = 0;
2062
2063 /*
2064 * Get a list of raw objects from the slab layer.
2065 */
2066 rlistc = skmem_slab_batch_alloc(skm, &rlist, need, skmflag);
2067 ASSERT(rlistc == 0 || rlist != NULL);
2068 rtop = rlist;
2069
2070 /*
2071 * Construct each object in the raw list. Upon failure,
2072 * free any remaining objects in the list back to the slab
2073 * layer, and keep the ones that were successfully constructed.
2074 * Here, "oi" and "oim" in each skmem_obj refer to the objects
2075 * coming from the master and slave regions (on mirrored
2076 * regions), respectively. They are stored inside the object
2077 * temporarily so that we can pass them to the constructor.
2078 */
2079 while (skm->skm_ctor != NULL && rlist != NULL) {
2080 struct skmem_obj_info *oi = &rlist->mo_info;
2081 struct skmem_obj_info *oim = &rlist->mo_minfo;
2082 struct skmem_obj *rlistn = rlist->mo_next;
2083
2084 /*
2085 * Note that the constructor guarantees at least
2086 * the size of a pointer at the top of the object
2087 * and no more than that. That means we must not
2088 * refer to "oi" and "oim" any longer after the
2089 * object goes thru the constructor.
2090 */
2091 if (skm->skm_ctor(oi, ((SKMEM_OBJ_ADDR(oim) != NULL) ?
2092 oim : NULL), skm->skm_private, skmflag) != 0) {
2093 VERIFY(rlist->mo_next == rlistn);
2094 atomic_add_64(&skm->skm_sl_alloc_fail,
2095 rlistc - c);
2096 if (rlistp != NULL) {
2097 rlistp->mo_next = NULL;
2098 }
2099 if (rlist == rtop) {
2100 rtop = NULL;
2101 ASSERT(c == 0);
2102 }
2103 skmem_slab_batch_free(skm, rlist);
2104 rlist = NULL;
2105 rlistc = c;
2106 break;
2107 }
2108 VERIFY(rlist->mo_next == rlistn);
2109
2110 ++c; /* # of constructed objs */
2111 rlistp = rlist;
2112 if ((rlist = rlist->mo_next) == NULL) {
2113 ASSERT(rlistc == c);
2114 break;
2115 }
2116 }
2117
2118 /*
2119 * At this point "top" points to the head of the chain we're
2120 * going to return to caller; "list" points to the tail of that
2121 * chain. The second chain begins at "rtop", and we append
2122 * that after "list" to form a single chain. "rlistc" is the
2123 * number of objects in "rtop" originated from the slab layer
2124 * that have been successfully constructed (if applicable).
2125 */
2126 ASSERT(c == 0 || rtop != NULL);
2127 need -= rlistc;
2128 *list = rtop;
2129 } else {
2130 struct skmem_obj_info oi, oim;
2131 void *buf;
2132
2133 ASSERT(*top == NULL && num == 1 && need == 1);
2134
2135 /*
2136 * Get a single raw object from the slab layer.
2137 */
2138 if (skmem_slab_alloc(skm, &oi, &oim, skmflag) != 0) {
2139 goto done;
2140 }
2141
2142 buf = SKMEM_OBJ_ADDR(&oi);
2143 ASSERT(buf != NULL);
2144
2145 /*
2146 * Construct the raw object. Here, "oi" and "oim" refer to
2147 * the objects coming from the master and slave regions (on
2148 * mirrored regions), respectively.
2149 */
2150 if (skm->skm_ctor != NULL &&
2151 skm->skm_ctor(&oi, ((SKMEM_OBJ_ADDR(&oim) != NULL) ?
2152 &oim : NULL), skm->skm_private, skmflag) != 0) {
2153 atomic_add_64(&skm->skm_sl_alloc_fail, 1);
2154 skmem_slab_free(skm, buf);
2155 goto done;
2156 }
2157
2158 need = 0;
2159 *list = buf;
2160 ASSERT(!(skm->skm_mode & SKM_MODE_BATCH) ||
2161 (*list)->mo_next == NULL);
2162 }
2163
2164 done:
2165 /* if auditing is enabled, record this transaction */
2166 if (__improbable(*top != NULL &&
2167 (skm->skm_mode & SKM_MODE_AUDIT) != 0)) {
2168 skmem_audit_buf(skm, *top);
2169 }
2170
2171 return num - need;
2172 }
2173
2174 /*
2175 * Free a constructed object to the cache.
2176 */
2177 void
skmem_cache_free(struct skmem_cache * skm,void * buf)2178 skmem_cache_free(struct skmem_cache *skm, void *buf)
2179 {
2180 if (skm->skm_mode & SKM_MODE_BATCH) {
2181 ((struct skmem_obj *)buf)->mo_next = NULL;
2182 }
2183 skmem_cache_batch_free(skm, (struct skmem_obj *)buf);
2184 }
2185
2186 void
skmem_cache_batch_free(struct skmem_cache * skm,struct skmem_obj * list)2187 skmem_cache_batch_free(struct skmem_cache *skm, struct skmem_obj *list)
2188 {
2189 struct skmem_cpu_cache *cp = SKMEM_CPU_CACHE(skm);
2190 struct skmem_magtype *mtp;
2191 struct skmem_mag *mg;
2192 struct skmem_obj *listn;
2193
2194 /* if auditing is enabled, record this transaction */
2195 if (__improbable((skm->skm_mode & SKM_MODE_AUDIT) != 0)) {
2196 skmem_audit_buf(skm, list);
2197 }
2198
2199 SKM_CPU_LOCK(cp);
2200 for (;;) {
2201 /*
2202 * If there's an available space in the current CPU's
2203 * loaded magazine, place it there and we're done.
2204 */
2205 if ((unsigned int)cp->cp_rounds <
2206 (unsigned int)cp->cp_magsize) {
2207 /*
2208 * In the SKM_MODE_BATCH case, reverse the list
2209 * while we place each object into the magazine;
2210 * this effectively causes the most recently
2211 * freed object to be reused during allocation.
2212 */
2213 if (skm->skm_mode & SKM_MODE_BATCH) {
2214 listn = list->mo_next;
2215 list->mo_next = (cp->cp_rounds == 0) ? NULL :
2216 cp->cp_loaded->mg_round[cp->cp_rounds - 1];
2217 } else {
2218 listn = NULL;
2219 }
2220
2221 cp->cp_loaded->mg_round[cp->cp_rounds++] = list;
2222 cp->cp_free++;
2223
2224 if ((list = listn) != NULL) {
2225 continue;
2226 }
2227
2228 SKM_CPU_UNLOCK(cp);
2229 return;
2230 }
2231
2232 /*
2233 * The loaded magazine is full. If the previously
2234 * loaded magazine was empty, exchange and try again.
2235 */
2236 if (cp->cp_prounds == 0) {
2237 skmem_cpu_reload(cp, cp->cp_ploaded, cp->cp_prounds);
2238 continue;
2239 }
2240
2241 /*
2242 * If the magazine layer is disabled, free to slab.
2243 * This can happen either because SKM_MODE_NOMAGAZINES
2244 * is set, or because we are resizing the magazine now.
2245 */
2246 if (cp->cp_magsize == 0) {
2247 break;
2248 }
2249
2250 /*
2251 * Both magazines for the CPU are full; try to get
2252 * empty magazine(s) from the depot. If we get one,
2253 * exchange a full magazine with it and place the
2254 * object in there.
2255 *
2256 * TODO: Because the caller currently doesn't indicate
2257 * the number of objects in the list, we choose the more
2258 * conservative approach of allocating only 1 empty
2259 * magazine (to prevent potential thrashing). Once we
2260 * have the object count, we can replace 1 with similar
2261 * logic as used in skmem_cache_batch_alloc().
2262 */
2263 (void) skmem_depot_batch_alloc(skm, &skm->skm_empty,
2264 &skm->skm_depot_empty, &mg, 1);
2265 if (mg != NULL) {
2266 SLIST_HEAD(, skmem_mag) mg_list =
2267 SLIST_HEAD_INITIALIZER(mg_list);
2268
2269 if (cp->cp_ploaded != NULL) {
2270 SLIST_INSERT_HEAD(&mg_list, cp->cp_ploaded,
2271 mg_link);
2272 }
2273 if (SLIST_NEXT(mg, mg_link) == NULL) {
2274 /*
2275 * Depot allocation returns only 1 magazine;
2276 * retain current full magazine.
2277 */
2278 skmem_cpu_reload(cp, mg, 0);
2279 } else {
2280 /*
2281 * We got 2 empty magazines from depot;
2282 * release the current full magazine back
2283 * to the depot layer.
2284 */
2285 if (cp->cp_loaded != NULL) {
2286 SLIST_INSERT_HEAD(&mg_list,
2287 cp->cp_loaded, mg_link);
2288 }
2289 skmem_cpu_batch_reload(cp, mg, 0);
2290 }
2291 skmem_depot_batch_free(skm, &skm->skm_full,
2292 &skm->skm_depot_full, SLIST_FIRST(&mg_list));
2293 continue;
2294 }
2295
2296 /*
2297 * We can't get any empty magazine from the depot, and
2298 * so we need to allocate one. If the allocation fails,
2299 * just fall through, deconstruct and free the object
2300 * to the slab layer.
2301 */
2302 mtp = skm->skm_magtype;
2303 SKM_CPU_UNLOCK(cp);
2304 mg = skmem_cache_alloc(mtp->mt_cache, SKMEM_NOSLEEP);
2305 SKM_CPU_LOCK(cp);
2306
2307 if (mg != NULL) {
2308 /*
2309 * We allocated an empty magazine, but since we
2310 * dropped the CPU lock above the magazine size
2311 * may have changed. If that's the case free
2312 * the magazine and try again.
2313 */
2314 if (cp->cp_magsize != mtp->mt_magsize) {
2315 SKM_CPU_UNLOCK(cp);
2316 skmem_cache_free(mtp->mt_cache, mg);
2317 SKM_CPU_LOCK(cp);
2318 continue;
2319 }
2320
2321 /*
2322 * We have a magazine with the right size;
2323 * add it to the depot and try again.
2324 */
2325 ASSERT(SLIST_NEXT(mg, mg_link) == NULL);
2326 skmem_depot_batch_free(skm, &skm->skm_empty,
2327 &skm->skm_depot_empty, mg);
2328 continue;
2329 }
2330
2331 /*
2332 * We can't get an empty magazine, so free to slab.
2333 */
2334 break;
2335 }
2336 SKM_CPU_UNLOCK(cp);
2337
2338 /*
2339 * We weren't able to free the constructed object(s) to the
2340 * magazine layer, so deconstruct them and free to the slab.
2341 */
2342 if (__probable((skm->skm_mode & SKM_MODE_BATCH) &&
2343 list->mo_next != NULL)) {
2344 /* whatever is left from original list */
2345 struct skmem_obj *top = list;
2346
2347 while (list != NULL && skm->skm_dtor != NULL) {
2348 listn = list->mo_next;
2349 list->mo_next = NULL;
2350
2351 /* deconstruct the object */
2352 if (skm->skm_dtor != NULL) {
2353 skm->skm_dtor((void *)list, skm->skm_private);
2354 }
2355
2356 list->mo_next = listn;
2357 list = listn;
2358 }
2359
2360 skmem_slab_batch_free(skm, top);
2361 } else {
2362 /* deconstruct the object */
2363 if (skm->skm_dtor != NULL) {
2364 skm->skm_dtor((void *)list, skm->skm_private);
2365 }
2366
2367 skmem_slab_free(skm, (void *)list);
2368 }
2369 }
2370
2371 /*
2372 * Return the maximum number of objects cached at the magazine layer
2373 * based on the chunk size. This takes into account the starting
2374 * magazine type as well as the final magazine type used in resizing.
2375 */
2376 uint32_t
skmem_cache_magazine_max(uint32_t chunksize)2377 skmem_cache_magazine_max(uint32_t chunksize)
2378 {
2379 struct skmem_magtype *mtp;
2380 uint32_t magsize_max;
2381
2382 VERIFY(ncpu != 0);
2383 VERIFY(chunksize > 0);
2384
2385 /* find a suitable magazine type for this chunk size */
2386 for (mtp = skmem_magtype; chunksize <= mtp->mt_minbuf; mtp++) {
2387 continue;
2388 }
2389
2390 /* and find the last magazine type */
2391 for (;;) {
2392 magsize_max = mtp->mt_magsize;
2393 if (mtp == skmem_cache_magsize_last ||
2394 chunksize >= mtp->mt_maxbuf) {
2395 break;
2396 }
2397 ++mtp;
2398 VERIFY(mtp <= skmem_cache_magsize_last);
2399 }
2400
2401 return ncpu * magsize_max * 2; /* two magazines per CPU */
2402 }
2403
2404 /*
2405 * Return true if SKMEM_DEBUG_NOMAGAZINES is not set on skmem_debug.
2406 */
2407 boolean_t
skmem_allow_magazines(void)2408 skmem_allow_magazines(void)
2409 {
2410 return !(skmem_debug & SKMEM_DEBUG_NOMAGAZINES);
2411 }
2412
2413 /*
2414 * Purge all magazines from a cache and disable its per-CPU magazines layer.
2415 */
2416 static void
skmem_cache_magazine_purge(struct skmem_cache * skm)2417 skmem_cache_magazine_purge(struct skmem_cache *skm)
2418 {
2419 struct skmem_cpu_cache *cp;
2420 struct skmem_mag *mg, *pmg;
2421 int rounds, prounds;
2422 uint32_t cpuid, mg_cnt = 0, pmg_cnt = 0;
2423
2424 SKM_SLAB_LOCK_ASSERT_NOTHELD(skm);
2425
2426 SK_DF(SK_VERB_MEM_CACHE, "skm 0x%llx", SK_KVA(skm));
2427
2428 for (cpuid = 0; cpuid < ncpu; cpuid++) {
2429 cp = &skm->skm_cpu_cache[cpuid];
2430
2431 SKM_CPU_LOCK_SPIN(cp);
2432 mg = cp->cp_loaded;
2433 pmg = cp->cp_ploaded;
2434 rounds = cp->cp_rounds;
2435 prounds = cp->cp_prounds;
2436 cp->cp_loaded = NULL;
2437 cp->cp_ploaded = NULL;
2438 cp->cp_rounds = -1;
2439 cp->cp_prounds = -1;
2440 cp->cp_magsize = 0;
2441 SKM_CPU_UNLOCK(cp);
2442
2443 if (mg != NULL) {
2444 skmem_magazine_destroy(skm, mg, rounds);
2445 ++mg_cnt;
2446 }
2447 if (pmg != NULL) {
2448 skmem_magazine_destroy(skm, pmg, prounds);
2449 ++pmg_cnt;
2450 }
2451 }
2452
2453 if (mg_cnt != 0 || pmg_cnt != 0) {
2454 atomic_add_32(&skm->skm_cpu_mag_purge, 1);
2455 }
2456
2457 skmem_depot_ws_zero(skm);
2458 skmem_depot_ws_reap(skm);
2459 }
2460
2461 /*
2462 * Enable magazines on a cache. Must only be called on a cache with
2463 * its per-CPU magazines layer disabled (e.g. due to purge).
2464 */
2465 static void
skmem_cache_magazine_enable(struct skmem_cache * skm,uint32_t arg)2466 skmem_cache_magazine_enable(struct skmem_cache *skm, uint32_t arg)
2467 {
2468 #pragma unused(arg)
2469 struct skmem_cpu_cache *cp;
2470 uint32_t cpuid;
2471
2472 if (skm->skm_mode & SKM_MODE_NOMAGAZINES) {
2473 return;
2474 }
2475
2476 for (cpuid = 0; cpuid < ncpu; cpuid++) {
2477 cp = &skm->skm_cpu_cache[cpuid];
2478 SKM_CPU_LOCK_SPIN(cp);
2479 /* the magazines layer must be disabled at this point */
2480 ASSERT(cp->cp_loaded == NULL);
2481 ASSERT(cp->cp_ploaded == NULL);
2482 ASSERT(cp->cp_rounds == -1);
2483 ASSERT(cp->cp_prounds == -1);
2484 ASSERT(cp->cp_magsize == 0);
2485 cp->cp_magsize = skm->skm_magtype->mt_magsize;
2486 SKM_CPU_UNLOCK(cp);
2487 }
2488
2489 SK_DF(SK_VERB_MEM_CACHE, "skm 0x%llx chunksize %u magsize %d",
2490 SK_KVA(skm), (uint32_t)skm->skm_chunksize,
2491 SKMEM_CPU_CACHE(skm)->cp_magsize);
2492 }
2493
2494 /*
2495 * Enter the cache resize perimeter. Upon success, claim exclusivity
2496 * on the perimeter and return 0, else EBUSY. Caller may indicate
2497 * whether or not they're willing to wait.
2498 */
2499 static int
skmem_cache_resize_enter(struct skmem_cache * skm,boolean_t can_sleep)2500 skmem_cache_resize_enter(struct skmem_cache *skm, boolean_t can_sleep)
2501 {
2502 SKM_RESIZE_LOCK(skm);
2503 if (skm->skm_rs_owner == current_thread()) {
2504 ASSERT(skm->skm_rs_busy != 0);
2505 skm->skm_rs_busy++;
2506 goto done;
2507 }
2508 if (!can_sleep) {
2509 if (skm->skm_rs_busy != 0) {
2510 SKM_RESIZE_UNLOCK(skm);
2511 return EBUSY;
2512 }
2513 } else {
2514 while (skm->skm_rs_busy != 0) {
2515 skm->skm_rs_want++;
2516 (void) assert_wait(&skm->skm_rs_busy, THREAD_UNINT);
2517 SKM_RESIZE_UNLOCK(skm);
2518 (void) thread_block(THREAD_CONTINUE_NULL);
2519 SK_DF(SK_VERB_MEM_CACHE, "waited for skm \"%s\" "
2520 "(0x%llx) busy=%u", skm->skm_name,
2521 SK_KVA(skm), skm->skm_rs_busy);
2522 SKM_RESIZE_LOCK(skm);
2523 }
2524 }
2525 SKM_RESIZE_LOCK_ASSERT_HELD(skm);
2526 ASSERT(skm->skm_rs_busy == 0);
2527 skm->skm_rs_busy++;
2528 skm->skm_rs_owner = current_thread();
2529 done:
2530 SKM_RESIZE_UNLOCK(skm);
2531 return 0;
2532 }
2533
2534 /*
2535 * Exit the cache resize perimeter and unblock any waiters.
2536 */
2537 static void
skmem_cache_resize_exit(struct skmem_cache * skm)2538 skmem_cache_resize_exit(struct skmem_cache *skm)
2539 {
2540 uint32_t want;
2541
2542 SKM_RESIZE_LOCK(skm);
2543 ASSERT(skm->skm_rs_busy != 0);
2544 ASSERT(skm->skm_rs_owner == current_thread());
2545 if (--skm->skm_rs_busy == 0) {
2546 skm->skm_rs_owner = NULL;
2547 /*
2548 * We're done; notify anyone that has lost the race.
2549 */
2550 if ((want = skm->skm_rs_want) != 0) {
2551 skm->skm_rs_want = 0;
2552 wakeup((void *)&skm->skm_rs_busy);
2553 SKM_RESIZE_UNLOCK(skm);
2554 } else {
2555 SKM_RESIZE_UNLOCK(skm);
2556 }
2557 } else {
2558 SKM_RESIZE_UNLOCK(skm);
2559 }
2560 }
2561
2562 /*
2563 * Recompute a cache's magazine size. This is an expensive operation
2564 * and should not be done frequently; larger magazines provide for a
2565 * higher transfer rate with the depot while smaller magazines reduce
2566 * the memory consumption.
2567 */
2568 static void
skmem_cache_magazine_resize(struct skmem_cache * skm)2569 skmem_cache_magazine_resize(struct skmem_cache *skm)
2570 {
2571 struct skmem_magtype *mtp = skm->skm_magtype;
2572
2573 /* insist that we are executing in the update thread call context */
2574 ASSERT(sk_is_cache_update_protected());
2575 ASSERT(!(skm->skm_mode & SKM_MODE_NOMAGAZINES));
2576 /* depot contention only applies to dynamic mode */
2577 ASSERT(skm->skm_mode & SKM_MODE_DYNAMIC);
2578
2579 /*
2580 * Although we're executing in the context of the update thread
2581 * call, we need to protect the per-CPU states during resizing
2582 * against other synchronous cache purge/reenable requests that
2583 * could take place in parallel.
2584 */
2585 if (skm->skm_chunksize < mtp->mt_maxbuf) {
2586 (void) skmem_cache_resize_enter(skm, TRUE);
2587 skmem_cache_magazine_purge(skm);
2588
2589 /*
2590 * Upgrade to the next magazine type with larger size.
2591 */
2592 SKM_DEPOT_LOCK_SPIN(skm);
2593 skm->skm_cpu_mag_resize++;
2594 skm->skm_magtype = ++mtp;
2595 skm->skm_cpu_mag_size = skm->skm_magtype->mt_magsize;
2596 skm->skm_depot_contention_prev =
2597 skm->skm_depot_contention + INT_MAX;
2598 SKM_DEPOT_UNLOCK(skm);
2599
2600 skmem_cache_magazine_enable(skm, 0);
2601 skmem_cache_resize_exit(skm);
2602 }
2603 }
2604
2605 /*
2606 * Rescale the cache's allocated-address hash table.
2607 */
2608 static void
skmem_cache_hash_rescale(struct skmem_cache * skm)2609 skmem_cache_hash_rescale(struct skmem_cache *skm)
2610 {
2611 struct skmem_bufctl_bkt *old_table, *new_table;
2612 size_t old_size, new_size;
2613 uint32_t i, moved = 0;
2614
2615 /* insist that we are executing in the update thread call context */
2616 ASSERT(sk_is_cache_update_protected());
2617
2618 /*
2619 * To get small average lookup time (lookup depth near 1.0), the hash
2620 * table size should be roughly the same (not necessarily equivalent)
2621 * as the cache size.
2622 */
2623 new_size = MAX(skm->skm_hash_initial,
2624 (1 << (flsll(3 * skm->skm_sl_bufinuse + 4) - 2)));
2625 new_size = MIN(skm->skm_hash_limit, new_size);
2626 old_size = (skm->skm_hash_mask + 1);
2627
2628 if ((old_size >> 1) <= new_size && new_size <= (old_size << 1)) {
2629 return;
2630 }
2631
2632 new_table = sk_alloc_type_array(struct skmem_bufctl_bkt, new_size,
2633 Z_NOWAIT, skmem_tag_bufctl_hash);
2634 if (__improbable(new_table == NULL)) {
2635 return;
2636 }
2637
2638 for (i = 0; i < new_size; i++) {
2639 SLIST_INIT(&new_table[i].bcb_head);
2640 }
2641
2642 SKM_SLAB_LOCK(skm);
2643
2644 old_size = (skm->skm_hash_mask + 1);
2645 old_table = skm->skm_hash_table;
2646
2647 skm->skm_hash_mask = (new_size - 1);
2648 skm->skm_hash_table = new_table;
2649 skm->skm_sl_rescale++;
2650
2651 for (i = 0; i < old_size; i++) {
2652 struct skmem_bufctl_bkt *bcb = &old_table[i];
2653 struct skmem_bufctl_bkt *new_bcb;
2654 struct skmem_bufctl *bc;
2655
2656 while ((bc = SLIST_FIRST(&bcb->bcb_head)) != NULL) {
2657 SLIST_REMOVE_HEAD(&bcb->bcb_head, bc_link);
2658 new_bcb = SKMEM_CACHE_HASH(skm, bc->bc_addr);
2659 /*
2660 * Ideally we want to insert tail here, but simple
2661 * list doesn't give us that. The fact that we are
2662 * essentially reversing the order is not a big deal
2663 * here vis-a-vis the new table size.
2664 */
2665 SLIST_INSERT_HEAD(&new_bcb->bcb_head, bc, bc_link);
2666 ++moved;
2667 }
2668 ASSERT(SLIST_EMPTY(&bcb->bcb_head));
2669 }
2670
2671 SK_DF(SK_VERB_MEM_CACHE,
2672 "skm 0x%llx old_size %u new_size %u [%u moved]", SK_KVA(skm),
2673 (uint32_t)old_size, (uint32_t)new_size, moved);
2674
2675 SKM_SLAB_UNLOCK(skm);
2676
2677 sk_free_type_array(struct skmem_bufctl_bkt, old_size, old_table);
2678 }
2679
2680 /*
2681 * Apply a function to operate on all caches.
2682 */
2683 static void
skmem_cache_applyall(void (* func)(struct skmem_cache *,uint32_t),uint32_t arg)2684 skmem_cache_applyall(void (*func)(struct skmem_cache *, uint32_t), uint32_t arg)
2685 {
2686 struct skmem_cache *skm;
2687
2688 net_update_uptime();
2689
2690 SKMEM_CACHE_LOCK();
2691 TAILQ_FOREACH(skm, &skmem_cache_head, skm_link) {
2692 func(skm, arg);
2693 }
2694 SKMEM_CACHE_UNLOCK();
2695 }
2696
2697 /*
2698 * Reclaim unused memory from a cache.
2699 */
2700 static void
skmem_cache_reclaim(struct skmem_cache * skm,uint32_t lowmem)2701 skmem_cache_reclaim(struct skmem_cache *skm, uint32_t lowmem)
2702 {
2703 /*
2704 * Inform the owner to free memory if possible; the reclaim
2705 * policy is left to the owner. This is just an advisory.
2706 */
2707 if (skm->skm_reclaim != NULL) {
2708 skm->skm_reclaim(skm->skm_private);
2709 }
2710
2711 if (lowmem) {
2712 /*
2713 * If another thread is in the process of purging or
2714 * resizing, bail out and let the currently-ongoing
2715 * purging take its natural course.
2716 */
2717 if (skmem_cache_resize_enter(skm, FALSE) == 0) {
2718 skmem_cache_magazine_purge(skm);
2719 skmem_cache_magazine_enable(skm, 0);
2720 skmem_cache_resize_exit(skm);
2721 }
2722 } else {
2723 skmem_depot_ws_reap(skm);
2724 }
2725 }
2726
2727 /*
2728 * Thread call callback for reap.
2729 */
2730 static void
skmem_cache_reap_func(thread_call_param_t dummy,thread_call_param_t arg)2731 skmem_cache_reap_func(thread_call_param_t dummy, thread_call_param_t arg)
2732 {
2733 #pragma unused(dummy)
2734 void (*func)(void) = arg;
2735
2736 ASSERT(func == skmem_cache_reap_start || func == skmem_cache_reap_done);
2737 func();
2738 }
2739
2740 /*
2741 * Start reaping all caches; this is serialized via thread call.
2742 */
2743 static void
skmem_cache_reap_start(void)2744 skmem_cache_reap_start(void)
2745 {
2746 SK_DF(SK_VERB_MEM_CACHE, "now running");
2747 skmem_cache_applyall(skmem_cache_reclaim, skmem_lowmem_check());
2748 skmem_dispatch(skmem_cache_reap_tc, skmem_cache_reap_done,
2749 (skmem_cache_update_interval * NSEC_PER_SEC));
2750 }
2751
2752 /*
2753 * Stop reaping; this would allow another reap request to occur.
2754 */
2755 static void
skmem_cache_reap_done(void)2756 skmem_cache_reap_done(void)
2757 {
2758 volatile uint32_t *flag = &skmem_cache_reaping;
2759
2760 *flag = 0;
2761 membar_sync();
2762 }
2763
2764 /*
2765 * Immediately reap all unused memory of a cache. If purging,
2766 * also purge the cached objects at the CPU layer.
2767 */
2768 void
skmem_cache_reap_now(struct skmem_cache * skm,boolean_t purge)2769 skmem_cache_reap_now(struct skmem_cache *skm, boolean_t purge)
2770 {
2771 if (purge) {
2772 /*
2773 * If another thread is in the process of purging or
2774 * resizing, bail out and let the currently-ongoing
2775 * purging take its natural course.
2776 */
2777 if (skmem_cache_resize_enter(skm, FALSE) == 0) {
2778 skmem_cache_magazine_purge(skm);
2779 skmem_cache_magazine_enable(skm, 0);
2780 skmem_cache_resize_exit(skm);
2781 }
2782 } else {
2783 skmem_depot_ws_zero(skm);
2784 skmem_depot_ws_reap(skm);
2785 }
2786 }
2787
2788 /*
2789 * Request a global reap operation to be dispatched.
2790 */
2791 void
skmem_cache_reap(void)2792 skmem_cache_reap(void)
2793 {
2794 /* only one reaping episode is allowed at a time */
2795 if (skmem_lock_owner == current_thread() ||
2796 !atomic_test_set_32(&skmem_cache_reaping, 0, 1)) {
2797 return;
2798 }
2799
2800 skmem_dispatch(skmem_cache_reap_tc, skmem_cache_reap_start, 0);
2801 }
2802
2803 /*
2804 * Reap internal caches.
2805 */
2806 void
skmem_reap_caches(boolean_t purge)2807 skmem_reap_caches(boolean_t purge)
2808 {
2809 skmem_cache_reap_now(skmem_slab_cache, purge);
2810 skmem_cache_reap_now(skmem_bufctl_cache, purge);
2811
2812 /* packet buffer pool objects */
2813 pp_reap_caches(purge);
2814
2815 /* also handle the region cache(s) */
2816 skmem_region_reap_caches(purge);
2817 }
2818
2819 /*
2820 * Thread call callback for update.
2821 */
2822 static void
skmem_cache_update_func(thread_call_param_t dummy,thread_call_param_t arg)2823 skmem_cache_update_func(thread_call_param_t dummy, thread_call_param_t arg)
2824 {
2825 #pragma unused(dummy, arg)
2826 sk_protect_t protect;
2827
2828 protect = sk_cache_update_protect();
2829 skmem_cache_applyall(skmem_cache_update, 0);
2830 sk_cache_update_unprotect(protect);
2831
2832 skmem_dispatch(skmem_cache_update_tc, NULL,
2833 (skmem_cache_update_interval * NSEC_PER_SEC));
2834 }
2835
2836 /*
2837 * Given a buffer control, record the current transaction.
2838 */
2839 __attribute__((noinline, cold, not_tail_called))
2840 static inline void
skmem_audit_bufctl(struct skmem_bufctl * bc)2841 skmem_audit_bufctl(struct skmem_bufctl *bc)
2842 {
2843 struct skmem_bufctl_audit *bca = (struct skmem_bufctl_audit *)bc;
2844 struct timeval tv;
2845
2846 microuptime(&tv);
2847 bca->bc_thread = current_thread();
2848 bca->bc_timestamp = (uint32_t)((tv.tv_sec * 1000) + (tv.tv_usec / 1000));
2849 bca->bc_depth = OSBacktrace(bca->bc_stack, SKMEM_STACK_DEPTH);
2850 }
2851
2852 /*
2853 * Given an object, find its buffer control and record the transaction.
2854 */
2855 __attribute__((noinline, cold, not_tail_called))
2856 static inline void
skmem_audit_buf(struct skmem_cache * skm,struct skmem_obj * list)2857 skmem_audit_buf(struct skmem_cache *skm, struct skmem_obj *list)
2858 {
2859 struct skmem_bufctl_bkt *bcb;
2860 struct skmem_bufctl *bc;
2861
2862 ASSERT(!(skm->skm_mode & SKM_MODE_PSEUDO));
2863
2864 SKM_SLAB_LOCK(skm);
2865 while (list != NULL) {
2866 void *buf = list;
2867
2868 bcb = SKMEM_CACHE_HASH(skm, buf);
2869 SLIST_FOREACH(bc, &bcb->bcb_head, bc_link) {
2870 if (bc->bc_addr == buf) {
2871 break;
2872 }
2873 }
2874
2875 if (__improbable(bc == NULL)) {
2876 panic("%s: %s failed to get bufctl for %p",
2877 __func__, skm->skm_name, buf);
2878 /* NOTREACHED */
2879 __builtin_unreachable();
2880 }
2881
2882 skmem_audit_bufctl(bc);
2883
2884 if (!(skm->skm_mode & SKM_MODE_BATCH)) {
2885 break;
2886 }
2887
2888 list = list->mo_next;
2889 }
2890 SKM_SLAB_UNLOCK(skm);
2891 }
2892
2893 static size_t
skmem_cache_mib_get_stats(struct skmem_cache * skm,void * out,size_t len)2894 skmem_cache_mib_get_stats(struct skmem_cache *skm, void *out, size_t len)
2895 {
2896 size_t actual_space = sizeof(struct sk_stats_cache);
2897 struct sk_stats_cache *sca = out;
2898 int contention;
2899
2900 if (out == NULL || len < actual_space) {
2901 goto done;
2902 }
2903
2904 bzero(sca, sizeof(*sca));
2905 (void) snprintf(sca->sca_name, sizeof(sca->sca_name), "%s",
2906 skm->skm_name);
2907 uuid_copy(sca->sca_uuid, skm->skm_uuid);
2908 uuid_copy(sca->sca_ruuid, skm->skm_region->skr_uuid);
2909 sca->sca_mode = skm->skm_mode;
2910 sca->sca_bufsize = (uint64_t)skm->skm_bufsize;
2911 sca->sca_objsize = (uint64_t)skm->skm_objsize;
2912 sca->sca_chunksize = (uint64_t)skm->skm_chunksize;
2913 sca->sca_slabsize = (uint64_t)skm->skm_slabsize;
2914 sca->sca_bufalign = (uint64_t)skm->skm_bufalign;
2915 sca->sca_objalign = (uint64_t)skm->skm_objalign;
2916
2917 sca->sca_cpu_mag_size = skm->skm_cpu_mag_size;
2918 sca->sca_cpu_mag_resize = skm->skm_cpu_mag_resize;
2919 sca->sca_cpu_mag_purge = skm->skm_cpu_mag_purge;
2920 sca->sca_cpu_mag_reap = skm->skm_cpu_mag_reap;
2921 sca->sca_depot_full = skm->skm_depot_full;
2922 sca->sca_depot_empty = skm->skm_depot_empty;
2923 sca->sca_depot_ws_zero = skm->skm_depot_ws_zero;
2924 /* in case of a race this might be a negative value, turn it into 0 */
2925 if ((contention = (int)(skm->skm_depot_contention -
2926 skm->skm_depot_contention_prev)) < 0) {
2927 contention = 0;
2928 }
2929 sca->sca_depot_contention_factor = contention;
2930
2931 sca->sca_sl_create = skm->skm_sl_create;
2932 sca->sca_sl_destroy = skm->skm_sl_destroy;
2933 sca->sca_sl_alloc = skm->skm_sl_alloc;
2934 sca->sca_sl_free = skm->skm_sl_free;
2935 sca->sca_sl_alloc_fail = skm->skm_sl_alloc_fail;
2936 sca->sca_sl_partial = skm->skm_sl_partial;
2937 sca->sca_sl_empty = skm->skm_sl_empty;
2938 sca->sca_sl_bufinuse = skm->skm_sl_bufinuse;
2939 sca->sca_sl_rescale = skm->skm_sl_rescale;
2940 sca->sca_sl_hash_size = (skm->skm_hash_mask + 1);
2941
2942 done:
2943 return actual_space;
2944 }
2945
2946 static int
2947 skmem_cache_mib_get_sysctl SYSCTL_HANDLER_ARGS
2948 {
2949 #pragma unused(arg1, arg2, oidp)
2950 struct skmem_cache *skm;
2951 size_t actual_space;
2952 size_t buffer_space;
2953 size_t allocated_space;
2954 caddr_t buffer = NULL;
2955 caddr_t scan;
2956 int error = 0;
2957
2958 if (!kauth_cred_issuser(kauth_cred_get())) {
2959 return EPERM;
2960 }
2961
2962 net_update_uptime();
2963 buffer_space = req->oldlen;
2964 if (req->oldptr != USER_ADDR_NULL && buffer_space != 0) {
2965 if (buffer_space > SK_SYSCTL_ALLOC_MAX) {
2966 buffer_space = SK_SYSCTL_ALLOC_MAX;
2967 }
2968 allocated_space = buffer_space;
2969 buffer = sk_alloc_data(allocated_space, Z_WAITOK, skmem_tag_cache_mib);
2970 if (__improbable(buffer == NULL)) {
2971 return ENOBUFS;
2972 }
2973 } else if (req->oldptr == USER_ADDR_NULL) {
2974 buffer_space = 0;
2975 }
2976 actual_space = 0;
2977 scan = buffer;
2978
2979 SKMEM_CACHE_LOCK();
2980 TAILQ_FOREACH(skm, &skmem_cache_head, skm_link) {
2981 size_t size = skmem_cache_mib_get_stats(skm, scan, buffer_space);
2982 if (scan != NULL) {
2983 if (buffer_space < size) {
2984 /* supplied buffer too small, stop copying */
2985 error = ENOMEM;
2986 break;
2987 }
2988 scan += size;
2989 buffer_space -= size;
2990 }
2991 actual_space += size;
2992 }
2993 SKMEM_CACHE_UNLOCK();
2994
2995 if (actual_space != 0) {
2996 int out_error = SYSCTL_OUT(req, buffer, actual_space);
2997 if (out_error != 0) {
2998 error = out_error;
2999 }
3000 }
3001 if (buffer != NULL) {
3002 sk_free_data(buffer, allocated_space);
3003 }
3004
3005 return error;
3006 }
3007