1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <machine/limits.h>
31 #include <machine/machine_routines.h>
32 #include <vm/vm_pageout.h>
33
34 /*
35 * Region templates.
36 *
37 * Regions that are not eligible for user task mapping must never be
38 * marked with the SKMEM_REGION_CR_MMAPOK flag. Such regions will
39 * automatically be excluded from the mappable region array at arena
40 * creation time.
41 *
42 * Regions that allow their objects to be shared among other objects
43 * must be marked with SKMEM_REGION_CR_SHAREOK. This permits calls
44 * to skmem_bufctl_{use,unuse}() on the bufctls for the objects.
45 *
46 * Read-only regions must be marked with SKMEM_REGION_CR_UREADONLY.
47 * This will affect the protection property of the segments in those
48 * regions. This flag has no effect when the region is not mappable
49 * to a user task.
50 *
51 * The SKMEM_REGION_CR_NOMAGAZINES flag marks the region as unsupportive
52 * of the magazines layer when used by a skmem_cache. When this flag is
53 * not set, the number of objects in the region will be adjusted to
54 * include the worst-case number of objects cached at the CPU layer.
55 * By default, all regions have this flag set; this may be overridden
56 * by each client (after making a copy).
57 *
58 * Regions that don't support multi-segments can be marked with the
59 * SKMEM_REGION_CR_MONOLITHIC flag. This forces exactly one segment
60 * to cover all objects in the region. This also effectively caps
61 * the skmem_cache slab layer to have only a single slab.
62 *
63 * The correctness of the region templates is enforced at arena
64 * creation time.
65 */
66 static const struct skmem_region_params skmem_regions[SKMEM_REGIONS] = {
67 /*
68 * Leading guard page(s): {mappable, no-read-write, no-cache}
69 */
70 [SKMEM_REGION_GUARD_HEAD] = {
71 .srp_name = "headguard",
72 .srp_id = SKMEM_REGION_GUARD_HEAD,
73 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
74 SKMEM_REGION_CR_GUARD | SKMEM_REGION_CR_NOMAGAZINES |
75 SKMEM_REGION_CR_NOREDIRECT,
76 .srp_md_type = NEXUS_META_TYPE_INVALID,
77 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
78 },
79
80 /*
81 * Schema: {mappable, read-only, no-cache}
82 */
83 [SKMEM_REGION_SCHEMA] = {
84 .srp_name = "schema",
85 .srp_id = SKMEM_REGION_SCHEMA,
86 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
87 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES |
88 SKMEM_REGION_CR_NOREDIRECT | SKMEM_REGION_CR_PUREDATA,
89 .srp_md_type = NEXUS_META_TYPE_INVALID,
90 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
91 },
92
93 /*
94 * Rings: {mappable, no-cache}
95 */
96 [SKMEM_REGION_RING] = {
97 .srp_name = "ring",
98 .srp_id = SKMEM_REGION_RING,
99 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
100 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
101 .srp_md_type = NEXUS_META_TYPE_INVALID,
102 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
103 },
104
105 /*
106 * Buffers: {mappable, shareable}
107 */
108 [SKMEM_REGION_BUF] = {
109 .srp_name = "buf",
110 .srp_id = SKMEM_REGION_BUF,
111 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
112 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
113 SKMEM_REGION_CR_IODIR_OUT | SKMEM_REGION_CR_SHAREOK |
114 SKMEM_REGION_CR_PUREDATA,
115 .srp_md_type = NEXUS_META_TYPE_INVALID,
116 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
117 },
118 [SKMEM_REGION_RXBUF] = {
119 .srp_name = "rxbuf",
120 .srp_id = SKMEM_REGION_RXBUF,
121 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
122 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
123 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
124 .srp_r_obj_cnt = 0,
125 .srp_md_type = NEXUS_META_TYPE_INVALID,
126 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
127 },
128 [SKMEM_REGION_TXBUF] = {
129 .srp_name = "txbuf",
130 .srp_id = SKMEM_REGION_TXBUF,
131 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
132 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_OUT |
133 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
134 .srp_r_obj_cnt = 0,
135 .srp_md_type = NEXUS_META_TYPE_INVALID,
136 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
137 },
138
139 /*
140 * Userspace metadata: {mappable}
141 */
142 [SKMEM_REGION_UMD] = {
143 .srp_name = "umd",
144 .srp_id = SKMEM_REGION_UMD,
145 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
146 SKMEM_REGION_CR_NOMAGAZINES,
147 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
148 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
149 .srp_max_frags = 1,
150 },
151
152 /*
153 * Userspace buflet metadata: {mappable}
154 */
155 [SKMEM_REGION_UBFT] = {
156 .srp_name = "ubft",
157 .srp_id = SKMEM_REGION_UBFT,
158 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
159 SKMEM_REGION_CR_NOMAGAZINES,
160 .srp_r_obj_cnt = 0,
161 .srp_md_type = NEXUS_META_TYPE_INVALID,
162 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
163 .srp_max_frags = 1,
164 },
165
166 /*
167 * Tx/alloc userspace slot descriptors: {mappable, read-only, no-cache}
168 */
169 [SKMEM_REGION_TXAUSD] = {
170 .srp_name = "txausd",
171 .srp_id = SKMEM_REGION_TXAUSD,
172 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
173 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES,
174 .srp_md_type = NEXUS_META_TYPE_INVALID,
175 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
176 },
177
178 /*
179 * Rx/free userspace slot descriptors: {mappable, read-only, no-cache}
180 */
181 [SKMEM_REGION_RXFUSD] = {
182 .srp_name = "rxfusd",
183 .srp_id = SKMEM_REGION_RXFUSD,
184 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
185 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES,
186 .srp_md_type = NEXUS_META_TYPE_INVALID,
187 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
188 },
189
190 /*
191 * Shared statistics: {mappable, monolithic, no-cache}
192 */
193 [SKMEM_REGION_USTATS] = {
194 .srp_name = "ustats",
195 .srp_id = SKMEM_REGION_USTATS,
196 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
197 SKMEM_REGION_CR_MONOLITHIC | SKMEM_REGION_CR_NOMAGAZINES |
198 SKMEM_REGION_CR_PUREDATA,
199 .srp_md_type = NEXUS_META_TYPE_INVALID,
200 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
201 },
202
203 /*
204 * Flow advisories: {mappable, read-only, monolithic, no-cache}
205 */
206 [SKMEM_REGION_FLOWADV] = {
207 .srp_name = "flowadv",
208 .srp_id = SKMEM_REGION_FLOWADV,
209 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
210 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
211 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
212 .srp_md_type = NEXUS_META_TYPE_INVALID,
213 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
214 },
215
216 /*
217 * Nexus advisories: {mappable, read-only, monolithic, no-cache}
218 */
219 [SKMEM_REGION_NEXUSADV] = {
220 .srp_name = "nexusadv",
221 .srp_id = SKMEM_REGION_NEXUSADV,
222 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
223 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
224 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PERSISTENT |
225 SKMEM_REGION_CR_PUREDATA,
226 .srp_md_type = NEXUS_META_TYPE_INVALID,
227 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
228 },
229
230 /*
231 * sysctls: {mappable, monolithic, no-cache}
232 */
233 [SKMEM_REGION_SYSCTLS] = {
234 .srp_name = "sysctls",
235 .srp_id = SKMEM_REGION_SYSCTLS,
236 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
237 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
238 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_NOREDIRECT |
239 SKMEM_REGION_CR_PUREDATA,
240 .srp_md_type = NEXUS_META_TYPE_INVALID,
241 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
242 },
243
244 /*
245 * Trailing guard page(s): {mappable, no-read-write, no-cache}
246 */
247 [SKMEM_REGION_GUARD_TAIL] = {
248 .srp_name = "tailguard",
249 .srp_id = SKMEM_REGION_GUARD_TAIL,
250 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
251 SKMEM_REGION_CR_GUARD | SKMEM_REGION_CR_NOMAGAZINES |
252 SKMEM_REGION_CR_NOREDIRECT,
253 .srp_md_type = NEXUS_META_TYPE_INVALID,
254 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
255 },
256
257 /*
258 * Kernel metadata.
259 */
260 [SKMEM_REGION_KMD] = {
261 .srp_name = "kmd",
262 .srp_id = SKMEM_REGION_KMD,
263 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
264 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
265 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
266 .srp_max_frags = 1,
267 },
268 [SKMEM_REGION_RXKMD] = {
269 .srp_name = "rxkmd",
270 .srp_id = SKMEM_REGION_RXKMD,
271 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
272 .srp_r_obj_cnt = 0,
273 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
274 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
275 .srp_max_frags = 1,
276 },
277 [SKMEM_REGION_TXKMD] = {
278 .srp_name = "txkmd",
279 .srp_id = SKMEM_REGION_TXKMD,
280 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
281 .srp_r_obj_cnt = 0,
282 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
283 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
284 .srp_max_frags = 1,
285 },
286
287 /*
288 * kernel buflet metadata.
289 */
290 [SKMEM_REGION_KBFT] = {
291 .srp_name = "kbft",
292 .srp_id = SKMEM_REGION_KBFT,
293 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
294 .srp_r_obj_cnt = 0,
295 .srp_md_type = NEXUS_META_TYPE_INVALID,
296 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
297 },
298 [SKMEM_REGION_RXKBFT] = {
299 .srp_name = "rxkbft",
300 .srp_id = SKMEM_REGION_RXKBFT,
301 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
302 .srp_r_obj_cnt = 0,
303 .srp_md_type = NEXUS_META_TYPE_INVALID,
304 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
305 },
306 [SKMEM_REGION_TXKBFT] = {
307 .srp_name = "txkbft",
308 .srp_id = SKMEM_REGION_TXKBFT,
309 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
310 .srp_r_obj_cnt = 0,
311 .srp_md_type = NEXUS_META_TYPE_INVALID,
312 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
313 },
314
315 /*
316 * Tx/alloc kernel slot descriptors: {no-cache}
317 */
318 [SKMEM_REGION_TXAKSD] = {
319 .srp_name = "txaksd",
320 .srp_id = SKMEM_REGION_TXAKSD,
321 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
322 .srp_md_type = NEXUS_META_TYPE_INVALID,
323 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
324 },
325
326 /*
327 * Rx/free kernel slot descriptors: {no-cache}
328 */
329 [SKMEM_REGION_RXFKSD] = {
330 .srp_name = "rxfksd",
331 .srp_id = SKMEM_REGION_RXFKSD,
332 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
333 .srp_md_type = NEXUS_META_TYPE_INVALID,
334 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
335 },
336
337 /*
338 * Statistics kernel snapshot: {no-cache}
339 */
340 [SKMEM_REGION_KSTATS] = {
341 .srp_name = "kstats",
342 .srp_id = SKMEM_REGION_KSTATS,
343 .srp_cflags = SKMEM_REGION_CR_MONOLITHIC |
344 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
345 .srp_md_type = NEXUS_META_TYPE_INVALID,
346 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
347 },
348
349 /*
350 * Intrinsic objects.
351 */
352 [SKMEM_REGION_INTRINSIC] = {
353 .srp_name = "intrinsic",
354 .srp_id = SKMEM_REGION_INTRINSIC,
355 .srp_cflags = SKMEM_REGION_CR_PSEUDO,
356 .srp_md_type = NEXUS_META_TYPE_INVALID,
357 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
358 },
359 };
360
361 const skmem_region_id_t skmem_pp_region_ids[SKMEM_PP_REGIONS] = {
362 SKMEM_REGION_BUF,
363 SKMEM_REGION_RXBUF,
364 SKMEM_REGION_TXBUF,
365 SKMEM_REGION_KMD,
366 SKMEM_REGION_RXKMD,
367 SKMEM_REGION_TXKMD,
368 SKMEM_REGION_UMD,
369 SKMEM_REGION_KBFT,
370 SKMEM_REGION_RXKBFT,
371 SKMEM_REGION_TXKBFT,
372 SKMEM_REGION_UBFT
373 };
374
375 /* CPU cache line (determined at runtime) */
376 static unsigned int cpu_cache_line_size;
377
378 LCK_ATTR_DECLARE(skmem_lock_attr, 0, 0);
379 LCK_GRP_DECLARE(skmem_lock_grp, "skmem");
380
381 #if (DEVELOPMENT || DEBUG)
382 SYSCTL_NODE(_kern_skywalk, OID_AUTO, mem, CTLFLAG_RW | CTLFLAG_LOCKED,
383 0, "Skywalk kmem");
384 #endif /* (DEVELOPMENT || DEBUG) */
385
386 #define SK_SYS_OBJSIZE_DEFAULT (16 * 1024)
387
388 /* system-wide sysctls region */
389 static struct skmem_region *sk_sys_region;
390 static void *sk_sys_obj;
391 static uint32_t sk_sys_objsize;
392
393 static void skmem_sys_region_init(void);
394 static void skmem_sys_region_fini(void);
395
396 static char *skmem_dump_buf;
397 #define SKMEM_DUMP_BUF_SIZE 2048 /* size of dump buffer */
398
399 static int __skmem_inited = 0;
400
401 void
skmem_init(void)402 skmem_init(void)
403 {
404 ASSERT(!__skmem_inited);
405
406 /* get CPU cache line size */
407 (void) skmem_cpu_cache_line_size();
408
409 skmem_arena_init();
410 skmem_cache_pre_init();
411 skmem_region_init();
412 skmem_cache_init();
413 pp_init();
414
415 __skmem_inited = 1;
416
417 /* set up system-wide region for sysctls */
418 skmem_sys_region_init();
419 }
420
421 void
skmem_fini(void)422 skmem_fini(void)
423 {
424 if (__skmem_inited) {
425 skmem_sys_region_fini();
426
427 pp_fini();
428 skmem_cache_fini();
429 skmem_region_fini();
430 skmem_arena_fini();
431
432 __skmem_inited = 0;
433 }
434 }
435
436 /*
437 * Return the default region parameters (template). Callers must never
438 * modify the returned region, and should treat it as invariant.
439 */
440 const struct skmem_region_params *
skmem_get_default(skmem_region_id_t id)441 skmem_get_default(skmem_region_id_t id)
442 {
443 ASSERT(id < SKMEM_REGIONS);
444 return &skmem_regions[id];
445 }
446
447 /*
448 * Return the CPU cache line size.
449 */
450 uint32_t
skmem_cpu_cache_line_size(void)451 skmem_cpu_cache_line_size(void)
452 {
453 if (__improbable(cpu_cache_line_size == 0)) {
454 ml_cpu_info_t cpu_info;
455 ml_cpu_get_info(&cpu_info);
456 cpu_cache_line_size = (uint32_t)cpu_info.cache_line_size;
457 ASSERT((SKMEM_PAGE_SIZE % cpu_cache_line_size) == 0);
458 }
459 return cpu_cache_line_size;
460 }
461
462 /*
463 * Dispatch a function to execute in a thread call.
464 */
465 void
skmem_dispatch(thread_call_t tcall,void (* func)(void),uint64_t delay)466 skmem_dispatch(thread_call_t tcall, void (*func)(void), uint64_t delay)
467 {
468 uint64_t now = mach_absolute_time();
469 uint64_t ival, deadline = now;
470
471 ASSERT(tcall != NULL);
472
473 if (delay == 0) {
474 delay = (10 * NSEC_PER_USEC); /* "immediately", 10 usec */
475 }
476 nanoseconds_to_absolutetime(delay, &ival);
477 clock_deadline_for_periodic_event(ival, now, &deadline);
478 (void) thread_call_enter1_delayed(tcall, func, deadline);
479 }
480
481 static void
skmem_sys_region_init(void)482 skmem_sys_region_init(void)
483 {
484 struct skmem_region_params srp;
485
486 VERIFY(__skmem_inited);
487 VERIFY(sk_sys_region == NULL);
488
489 srp = *skmem_get_default(SKMEM_REGION_SYSCTLS);
490 ASSERT((srp.srp_cflags & (SKMEM_REGION_CR_MMAPOK |
491 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
492 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_NOREDIRECT)) ==
493 (SKMEM_REGION_CR_MMAPOK | SKMEM_REGION_CR_UREADONLY |
494 SKMEM_REGION_CR_MONOLITHIC | SKMEM_REGION_CR_NOMAGAZINES |
495 SKMEM_REGION_CR_NOREDIRECT));
496
497 srp.srp_r_obj_cnt = 1;
498 srp.srp_r_obj_size = sk_sys_objsize = SK_SYS_OBJSIZE_DEFAULT;
499 skmem_region_params_config(&srp);
500
501 _CASSERT(SK_SYS_OBJSIZE_DEFAULT >= sizeof(skmem_sysctl));
502 sk_sys_region = skmem_region_create("global", &srp, NULL, NULL, NULL);
503 if (sk_sys_region == NULL) {
504 panic("failed to allocate global sysctls region");
505 /* NOTREACHED */
506 __builtin_unreachable();
507 }
508
509 sk_sys_obj = skmem_region_alloc(sk_sys_region, NULL, NULL,
510 NULL, SKMEM_SLEEP);
511 if (sk_sys_obj == NULL) {
512 panic("failed to allocate global sysctls object (%u bytes)",
513 sk_sys_objsize);
514 /* NOTREACHED */
515 __builtin_unreachable();
516 }
517
518 skmem_sysctl_init();
519 }
520
521 static void
skmem_sys_region_fini(void)522 skmem_sys_region_fini(void)
523 {
524 if (sk_sys_region != NULL) {
525 skmem_region_free(sk_sys_region, sk_sys_obj, NULL);
526 sk_sys_obj = NULL;
527 skmem_region_release(sk_sys_region);
528 sk_sys_region = NULL;
529 }
530 VERIFY(sk_sys_obj == NULL);
531 }
532
533 struct skmem_region *
skmem_get_sysctls_region(void)534 skmem_get_sysctls_region(void)
535 {
536 return sk_sys_region;
537 }
538
539 void *
skmem_get_sysctls_obj(size_t * size)540 skmem_get_sysctls_obj(size_t *size)
541 {
542 if (size != NULL) {
543 *size = sk_sys_objsize;
544 }
545
546 return sk_sys_obj;
547 }
548
549 /* for VM stats */
550 extern unsigned int vm_page_free_count, vm_page_speculative_count,
551 vm_page_active_count, vm_page_inactive_count, vm_page_inactive_count,
552 vm_page_wire_count, vm_page_throttled_count, vm_lopage_free_count,
553 vm_page_purgeable_count, vm_page_purged_count;
554
555 #define SKMEM_WDT_DUMP_BUF_CHK() do { \
556 clen -= k; \
557 if (clen < 1) \
558 goto done; \
559 c += k; \
560 } while (0)
561
562 /*
563 * The compiler doesn't know that snprintf() supports %b format
564 * specifier, so use our own wrapper to vsnprintf() here instead.
565 */
566 #define skmem_snprintf(str, size, format, ...) ({ \
567 _Pragma("clang diagnostic push") \
568 _Pragma("clang diagnostic ignored \"-Wformat-invalid-specifier\"") \
569 _Pragma("clang diagnostic ignored \"-Wformat-extra-args\"") \
570 _Pragma("clang diagnostic ignored \"-Wformat\"") \
571 snprintf(str, size, format, ## __VA_ARGS__) \
572 _Pragma("clang diagnostic pop"); \
573 })
574
575 __attribute__((noinline, cold, not_tail_called))
576 char *
skmem_dump(struct skmem_region * skr)577 skmem_dump(struct skmem_region *skr)
578 {
579 int k, clen = SKMEM_DUMP_BUF_SIZE;
580 struct skmem_cache *skm;
581 char *c;
582
583 /* allocate space for skmem_dump_buf */
584 if (skmem_dump_buf == NULL) {
585 skmem_dump_buf = (char *) kalloc_data(SKMEM_DUMP_BUF_SIZE,
586 (Z_ZERO | Z_WAITOK));
587 VERIFY(skmem_dump_buf != NULL);
588 } else {
589 bzero(skmem_dump_buf, SKMEM_DUMP_BUF_SIZE);
590 }
591 c = skmem_dump_buf;
592
593 k = skmem_snprintf(c, clen,
594 "Region %p\n"
595 " | Mode : 0x%b\n"
596 " | Memory : [%llu in use [%llu wired]] / [%llu total]\n"
597 " | Transactions : [%llu segment allocs, %llu frees]\n\n",
598 skr, skr->skr_mode, SKR_MODE_BITS, skr->skr_meminuse,
599 skr->skr_w_meminuse, skr->skr_memtotal, skr->skr_alloc,
600 skr->skr_free);
601 SKMEM_WDT_DUMP_BUF_CHK();
602
603 if ((skr->skr_mode & SKR_MODE_SLAB) && (skm = skr->skr_cache) != NULL) {
604 k = skmem_snprintf(c, clen,
605 "Cache %p\n"
606 " | Mode : 0x%b\n"
607 " | Memory : [%llu in use] / [%llu total]\n"
608 " | Transactions : [%llu alloc failures]\n"
609 " | [%llu slab creates, %llu destroys]\n"
610 " | [%llu slab allocs, %llu frees]\n\n",
611 skm, skm->skm_mode, SKM_MODE_BITS, skm->skm_sl_bufinuse,
612 skm->skm_sl_bufmax, skm->skm_sl_alloc_fail,
613 skm->skm_sl_create, skm->skm_sl_destroy, skm->skm_sl_alloc,
614 skm->skm_sl_free);
615 SKMEM_WDT_DUMP_BUF_CHK();
616 }
617
618 k = skmem_snprintf(c, clen,
619 "VM Pages\n"
620 " | Free : %u [%u speculative]\n"
621 " | Active : %u\n"
622 " | Inactive : %u\n"
623 " | Wired : %u [%u throttled, %u lopage_free]\n"
624 " | Purgeable : %u [%u purged]\n",
625 vm_page_free_count, vm_page_speculative_count,
626 vm_page_active_count, vm_page_inactive_count,
627 vm_page_wire_count, vm_page_throttled_count, vm_lopage_free_count,
628 vm_page_purgeable_count, vm_page_purged_count);
629 SKMEM_WDT_DUMP_BUF_CHK();
630
631 done:
632 return skmem_dump_buf;
633 }
634
635 boolean_t
skmem_lowmem_check(void)636 skmem_lowmem_check(void)
637 {
638 unsigned int plevel = kVMPressureNormal;
639 kern_return_t ret;
640
641 ret = mach_vm_pressure_level_monitor(false, &plevel);
642 if (ret == KERN_SUCCESS) {
643 /* kVMPressureCritical is the stage below jetsam */
644 if (plevel >= kVMPressureCritical) {
645 /*
646 * If we are in a low-memory situation, then we
647 * might want to start purging our caches.
648 */
649 return TRUE;
650 }
651 }
652 return FALSE;
653 }
654