1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <skywalk/os_skywalk_private.h>
30 #include <machine/limits.h>
31 #include <machine/machine_routines.h>
32 #include <vm/vm_pageout.h>
33
34 /*
35 * Region templates.
36 *
37 * Regions that are not eligible for user task mapping must never be
38 * marked with the SKMEM_REGION_CR_MMAPOK flag. Such regions will
39 * automatically be excluded from the mappable region array at arena
40 * creation time.
41 *
42 * Regions that allow their objects to be shared among other objects
43 * must be marked with SKMEM_REGION_CR_SHAREOK. This permits calls
44 * to skmem_bufctl_{use,unuse}() on the bufctls for the objects.
45 *
46 * Read-only regions must be marked with SKMEM_REGION_CR_UREADONLY.
47 * This will affect the protection property of the segments in those
48 * regions. This flag has no effect when the region is not mappable
49 * to a user task.
50 *
51 * The SKMEM_REGION_CR_NOMAGAZINES flag marks the region as unsupportive
52 * of the magazines layer when used by a skmem_cache. When this flag is
53 * not set, the number of objects in the region will be adjusted to
54 * include the worst-case number of objects cached at the CPU layer.
55 * By default, all regions have this flag set; this may be overridden
56 * by each client (after making a copy).
57 *
58 * Regions that don't support multi-segments can be marked with the
59 * SKMEM_REGION_CR_MONOLITHIC flag. This forces exactly one segment
60 * to cover all objects in the region. This also effectively caps
61 * the skmem_cache slab layer to have only a single slab.
62 *
63 * The correctness of the region templates is enforced at arena
64 * creation time.
65 */
66 static const struct skmem_region_params skmem_regions[SKMEM_REGIONS] = {
67 /*
68 * Leading guard page(s): {mappable, no-read-write, no-cache}
69 */
70 [SKMEM_REGION_GUARD_HEAD] = {
71 .srp_name = "headguard",
72 .srp_id = SKMEM_REGION_GUARD_HEAD,
73 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
74 SKMEM_REGION_CR_GUARD | SKMEM_REGION_CR_NOMAGAZINES |
75 SKMEM_REGION_CR_NOREDIRECT,
76 .srp_md_type = NEXUS_META_TYPE_INVALID,
77 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
78 },
79
80 /*
81 * Schema: {mappable, read-only, no-cache}
82 */
83 [SKMEM_REGION_SCHEMA] = {
84 .srp_name = "schema",
85 .srp_id = SKMEM_REGION_SCHEMA,
86 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
87 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES |
88 SKMEM_REGION_CR_NOREDIRECT | SKMEM_REGION_CR_PUREDATA,
89 .srp_md_type = NEXUS_META_TYPE_INVALID,
90 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
91 },
92
93 /*
94 * Rings: {mappable, no-cache}
95 */
96 [SKMEM_REGION_RING] = {
97 .srp_name = "ring",
98 .srp_id = SKMEM_REGION_RING,
99 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
100 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
101 .srp_md_type = NEXUS_META_TYPE_INVALID,
102 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
103 },
104
105 /*
106 * Buffers: {mappable, shareable}
107 */
108 [SKMEM_REGION_BUF_DEF] = {
109 .srp_name = "buf_def",
110 .srp_id = SKMEM_REGION_BUF_DEF,
111 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
112 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
113 SKMEM_REGION_CR_IODIR_OUT | SKMEM_REGION_CR_SHAREOK |
114 SKMEM_REGION_CR_PUREDATA,
115 .srp_md_type = NEXUS_META_TYPE_INVALID,
116 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
117 },
118 [SKMEM_REGION_BUF_LARGE] = {
119 .srp_name = "buf_large",
120 .srp_id = SKMEM_REGION_BUF_LARGE,
121 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
122 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
123 SKMEM_REGION_CR_IODIR_OUT | SKMEM_REGION_CR_SHAREOK |
124 SKMEM_REGION_CR_PUREDATA,
125 .srp_md_type = NEXUS_META_TYPE_INVALID,
126 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
127 },
128 [SKMEM_REGION_RXBUF_DEF] = {
129 .srp_name = "rxbuf_def",
130 .srp_id = SKMEM_REGION_RXBUF_DEF,
131 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
132 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
133 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
134 .srp_r_obj_cnt = 0,
135 .srp_md_type = NEXUS_META_TYPE_INVALID,
136 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
137 },
138 [SKMEM_REGION_RXBUF_LARGE] = {
139 .srp_name = "rxbuf_large",
140 .srp_id = SKMEM_REGION_RXBUF_LARGE,
141 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
142 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_IN |
143 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
144 .srp_r_obj_cnt = 0,
145 .srp_md_type = NEXUS_META_TYPE_INVALID,
146 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
147 },
148 [SKMEM_REGION_TXBUF_DEF] = {
149 .srp_name = "txbuf_def",
150 .srp_id = SKMEM_REGION_TXBUF_DEF,
151 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
152 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_OUT |
153 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
154 .srp_r_obj_cnt = 0,
155 .srp_md_type = NEXUS_META_TYPE_INVALID,
156 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
157 },
158 [SKMEM_REGION_TXBUF_LARGE] = {
159 .srp_name = "txbuf_large",
160 .srp_id = SKMEM_REGION_TXBUF_LARGE,
161 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
162 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_IODIR_OUT |
163 SKMEM_REGION_CR_SHAREOK | SKMEM_REGION_CR_PUREDATA,
164 .srp_r_obj_cnt = 0,
165 .srp_md_type = NEXUS_META_TYPE_INVALID,
166 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
167 },
168
169 /*
170 * Userspace metadata: {mappable}
171 */
172 [SKMEM_REGION_UMD] = {
173 .srp_name = "umd",
174 .srp_id = SKMEM_REGION_UMD,
175 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
176 SKMEM_REGION_CR_NOMAGAZINES,
177 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
178 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
179 .srp_max_frags = 1,
180 },
181
182 /*
183 * Userspace buflet metadata: {mappable}
184 */
185 [SKMEM_REGION_UBFT] = {
186 .srp_name = "ubft",
187 .srp_id = SKMEM_REGION_UBFT,
188 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
189 SKMEM_REGION_CR_NOMAGAZINES,
190 .srp_r_obj_cnt = 0,
191 .srp_md_type = NEXUS_META_TYPE_INVALID,
192 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
193 .srp_max_frags = 1,
194 },
195
196 /*
197 * Tx/alloc userspace slot descriptors: {mappable, read-only, no-cache}
198 */
199 [SKMEM_REGION_TXAUSD] = {
200 .srp_name = "txausd",
201 .srp_id = SKMEM_REGION_TXAUSD,
202 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
203 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES,
204 .srp_md_type = NEXUS_META_TYPE_INVALID,
205 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
206 },
207
208 /*
209 * Rx/free userspace slot descriptors: {mappable, read-only, no-cache}
210 */
211 [SKMEM_REGION_RXFUSD] = {
212 .srp_name = "rxfusd",
213 .srp_id = SKMEM_REGION_RXFUSD,
214 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
215 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_NOMAGAZINES,
216 .srp_md_type = NEXUS_META_TYPE_INVALID,
217 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
218 },
219
220 /*
221 * Shared statistics: {mappable, monolithic, no-cache}
222 */
223 [SKMEM_REGION_USTATS] = {
224 .srp_name = "ustats",
225 .srp_id = SKMEM_REGION_USTATS,
226 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
227 SKMEM_REGION_CR_MONOLITHIC | SKMEM_REGION_CR_NOMAGAZINES |
228 SKMEM_REGION_CR_PUREDATA,
229 .srp_md_type = NEXUS_META_TYPE_INVALID,
230 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
231 },
232
233 /*
234 * Flow advisories: {mappable, read-only, monolithic, no-cache}
235 */
236 [SKMEM_REGION_FLOWADV] = {
237 .srp_name = "flowadv",
238 .srp_id = SKMEM_REGION_FLOWADV,
239 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
240 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
241 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
242 .srp_md_type = NEXUS_META_TYPE_INVALID,
243 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
244 },
245
246 /*
247 * Nexus advisories: {mappable, read-only, monolithic, no-cache}
248 */
249 [SKMEM_REGION_NEXUSADV] = {
250 .srp_name = "nexusadv",
251 .srp_id = SKMEM_REGION_NEXUSADV,
252 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
253 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
254 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PERSISTENT |
255 SKMEM_REGION_CR_PUREDATA,
256 .srp_md_type = NEXUS_META_TYPE_INVALID,
257 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
258 },
259
260 /*
261 * sysctls: {mappable, monolithic, no-cache}
262 */
263 [SKMEM_REGION_SYSCTLS] = {
264 .srp_name = "sysctls",
265 .srp_id = SKMEM_REGION_SYSCTLS,
266 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
267 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
268 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_NOREDIRECT |
269 SKMEM_REGION_CR_PUREDATA,
270 .srp_md_type = NEXUS_META_TYPE_INVALID,
271 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
272 },
273
274 /*
275 * Trailing guard page(s): {mappable, no-read-write, no-cache}
276 */
277 [SKMEM_REGION_GUARD_TAIL] = {
278 .srp_name = "tailguard",
279 .srp_id = SKMEM_REGION_GUARD_TAIL,
280 .srp_cflags = SKMEM_REGION_CR_MMAPOK |
281 SKMEM_REGION_CR_GUARD | SKMEM_REGION_CR_NOMAGAZINES |
282 SKMEM_REGION_CR_NOREDIRECT,
283 .srp_md_type = NEXUS_META_TYPE_INVALID,
284 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
285 },
286
287 /*
288 * Kernel metadata.
289 */
290 [SKMEM_REGION_KMD] = {
291 .srp_name = "kmd",
292 .srp_id = SKMEM_REGION_KMD,
293 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
294 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
295 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
296 .srp_max_frags = 1,
297 },
298 [SKMEM_REGION_RXKMD] = {
299 .srp_name = "rxkmd",
300 .srp_id = SKMEM_REGION_RXKMD,
301 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
302 .srp_r_obj_cnt = 0,
303 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
304 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
305 .srp_max_frags = 1,
306 },
307 [SKMEM_REGION_TXKMD] = {
308 .srp_name = "txkmd",
309 .srp_id = SKMEM_REGION_TXKMD,
310 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
311 .srp_r_obj_cnt = 0,
312 .srp_md_type = NEXUS_META_TYPE_QUANTUM,
313 .srp_md_subtype = NEXUS_META_SUBTYPE_PAYLOAD,
314 .srp_max_frags = 1,
315 },
316
317 /*
318 * kernel buflet metadata.
319 */
320 [SKMEM_REGION_KBFT] = {
321 .srp_name = "kbft",
322 .srp_id = SKMEM_REGION_KBFT,
323 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
324 .srp_r_obj_cnt = 0,
325 .srp_md_type = NEXUS_META_TYPE_INVALID,
326 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
327 },
328 [SKMEM_REGION_RXKBFT] = {
329 .srp_name = "rxkbft",
330 .srp_id = SKMEM_REGION_RXKBFT,
331 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
332 .srp_r_obj_cnt = 0,
333 .srp_md_type = NEXUS_META_TYPE_INVALID,
334 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
335 },
336 [SKMEM_REGION_TXKBFT] = {
337 .srp_name = "txkbft",
338 .srp_id = SKMEM_REGION_TXKBFT,
339 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
340 .srp_r_obj_cnt = 0,
341 .srp_md_type = NEXUS_META_TYPE_INVALID,
342 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
343 },
344
345 /*
346 * Tx/alloc kernel slot descriptors: {no-cache}
347 */
348 [SKMEM_REGION_TXAKSD] = {
349 .srp_name = "txaksd",
350 .srp_id = SKMEM_REGION_TXAKSD,
351 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
352 .srp_md_type = NEXUS_META_TYPE_INVALID,
353 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
354 },
355
356 /*
357 * Rx/free kernel slot descriptors: {no-cache}
358 */
359 [SKMEM_REGION_RXFKSD] = {
360 .srp_name = "rxfksd",
361 .srp_id = SKMEM_REGION_RXFKSD,
362 .srp_cflags = SKMEM_REGION_CR_NOMAGAZINES,
363 .srp_md_type = NEXUS_META_TYPE_INVALID,
364 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
365 },
366
367 /*
368 * Statistics kernel snapshot: {no-cache}
369 */
370 [SKMEM_REGION_KSTATS] = {
371 .srp_name = "kstats",
372 .srp_id = SKMEM_REGION_KSTATS,
373 .srp_cflags = SKMEM_REGION_CR_MONOLITHIC |
374 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_PUREDATA,
375 .srp_md_type = NEXUS_META_TYPE_INVALID,
376 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
377 },
378
379 /*
380 * Intrinsic objects.
381 */
382 [SKMEM_REGION_INTRINSIC] = {
383 .srp_name = "intrinsic",
384 .srp_id = SKMEM_REGION_INTRINSIC,
385 .srp_cflags = SKMEM_REGION_CR_PSEUDO,
386 .srp_md_type = NEXUS_META_TYPE_INVALID,
387 .srp_md_subtype = NEXUS_META_SUBTYPE_INVALID,
388 },
389 };
390
391 const skmem_region_id_t skmem_pp_region_ids[SKMEM_PP_REGIONS] = {
392 SKMEM_REGION_BUF_DEF,
393 SKMEM_REGION_BUF_LARGE,
394 SKMEM_REGION_RXBUF_DEF,
395 SKMEM_REGION_RXBUF_LARGE,
396 SKMEM_REGION_TXBUF_DEF,
397 SKMEM_REGION_TXBUF_LARGE,
398 SKMEM_REGION_KMD,
399 SKMEM_REGION_RXKMD,
400 SKMEM_REGION_TXKMD,
401 SKMEM_REGION_UMD,
402 SKMEM_REGION_KBFT,
403 SKMEM_REGION_RXKBFT,
404 SKMEM_REGION_TXKBFT,
405 SKMEM_REGION_UBFT
406 };
407
408 /* CPU cache line (determined at runtime) */
409 static unsigned int cpu_cache_line_size;
410
411 LCK_ATTR_DECLARE(skmem_lock_attr, 0, 0);
412 LCK_GRP_DECLARE(skmem_lock_grp, "skmem");
413
414 #if (DEVELOPMENT || DEBUG)
415 SYSCTL_NODE(_kern_skywalk, OID_AUTO, mem, CTLFLAG_RW | CTLFLAG_LOCKED,
416 0, "Skywalk kmem");
417 #endif /* (DEVELOPMENT || DEBUG) */
418
419 #define SK_SYS_OBJSIZE_DEFAULT (16 * 1024)
420
421 /* system-wide sysctls region */
422 static struct skmem_region *sk_sys_region;
423 static void *sk_sys_obj;
424 static uint32_t sk_sys_objsize;
425
426 static void skmem_sys_region_init(void);
427 static void skmem_sys_region_fini(void);
428
429 static char *skmem_dump_buf;
430 #define SKMEM_DUMP_BUF_SIZE 2048 /* size of dump buffer */
431
432 static int __skmem_inited = 0;
433
434 void
skmem_init(void)435 skmem_init(void)
436 {
437 ASSERT(!__skmem_inited);
438
439 /* get CPU cache line size */
440 (void) skmem_cpu_cache_line_size();
441
442 skmem_cache_pre_init();
443 skmem_region_init();
444 skmem_cache_init();
445 pp_init();
446
447 __skmem_inited = 1;
448
449 /* set up system-wide region for sysctls */
450 skmem_sys_region_init();
451 }
452
453 void
skmem_fini(void)454 skmem_fini(void)
455 {
456 if (__skmem_inited) {
457 skmem_sys_region_fini();
458
459 pp_fini();
460 skmem_cache_fini();
461 skmem_region_fini();
462
463 __skmem_inited = 0;
464 }
465 }
466
467 /*
468 * Return the default region parameters (template). Callers must never
469 * modify the returned region, and should treat it as invariant.
470 */
471 const struct skmem_region_params *
skmem_get_default(skmem_region_id_t id)472 skmem_get_default(skmem_region_id_t id)
473 {
474 ASSERT(id < SKMEM_REGIONS);
475 return &skmem_regions[id];
476 }
477
478 /*
479 * Return the CPU cache line size.
480 */
481 uint32_t
skmem_cpu_cache_line_size(void)482 skmem_cpu_cache_line_size(void)
483 {
484 if (__improbable(cpu_cache_line_size == 0)) {
485 ml_cpu_info_t cpu_info;
486 ml_cpu_get_info(&cpu_info);
487 cpu_cache_line_size = (uint32_t)cpu_info.cache_line_size;
488 ASSERT((SKMEM_PAGE_SIZE % cpu_cache_line_size) == 0);
489 }
490 return cpu_cache_line_size;
491 }
492
493 /*
494 * Dispatch a function to execute in a thread call.
495 */
496 void
skmem_dispatch(thread_call_t tcall,void (* func)(void),uint64_t delay)497 skmem_dispatch(thread_call_t tcall, void (*func)(void), uint64_t delay)
498 {
499 uint64_t now = mach_absolute_time();
500 uint64_t ival, deadline = now;
501
502 ASSERT(tcall != NULL);
503
504 if (delay == 0) {
505 delay = (10 * NSEC_PER_USEC); /* "immediately", 10 usec */
506 }
507 nanoseconds_to_absolutetime(delay, &ival);
508 clock_deadline_for_periodic_event(ival, now, &deadline);
509 (void) thread_call_enter1_delayed(tcall, func, deadline);
510 }
511
512 static void
skmem_sys_region_init(void)513 skmem_sys_region_init(void)
514 {
515 struct skmem_region_params srp;
516
517 VERIFY(__skmem_inited);
518 VERIFY(sk_sys_region == NULL);
519
520 srp = *skmem_get_default(SKMEM_REGION_SYSCTLS);
521 ASSERT((srp.srp_cflags & (SKMEM_REGION_CR_MMAPOK |
522 SKMEM_REGION_CR_UREADONLY | SKMEM_REGION_CR_MONOLITHIC |
523 SKMEM_REGION_CR_NOMAGAZINES | SKMEM_REGION_CR_NOREDIRECT)) ==
524 (SKMEM_REGION_CR_MMAPOK | SKMEM_REGION_CR_UREADONLY |
525 SKMEM_REGION_CR_MONOLITHIC | SKMEM_REGION_CR_NOMAGAZINES |
526 SKMEM_REGION_CR_NOREDIRECT));
527
528 srp.srp_r_obj_cnt = 1;
529 srp.srp_r_obj_size = sk_sys_objsize = SK_SYS_OBJSIZE_DEFAULT;
530 skmem_region_params_config(&srp);
531
532 _CASSERT(SK_SYS_OBJSIZE_DEFAULT >= sizeof(skmem_sysctl));
533 sk_sys_region = skmem_region_create("global", &srp, NULL, NULL, NULL);
534 if (sk_sys_region == NULL) {
535 panic("failed to allocate global sysctls region");
536 /* NOTREACHED */
537 __builtin_unreachable();
538 }
539
540 sk_sys_obj = skmem_region_alloc(sk_sys_region, NULL, NULL,
541 NULL, SKMEM_SLEEP);
542 if (sk_sys_obj == NULL) {
543 panic("failed to allocate global sysctls object (%u bytes)",
544 sk_sys_objsize);
545 /* NOTREACHED */
546 __builtin_unreachable();
547 }
548
549 skmem_sysctl_init();
550 }
551
552 static void
skmem_sys_region_fini(void)553 skmem_sys_region_fini(void)
554 {
555 if (sk_sys_region != NULL) {
556 skmem_region_free(sk_sys_region, sk_sys_obj, NULL);
557 sk_sys_obj = NULL;
558 skmem_region_release(sk_sys_region);
559 sk_sys_region = NULL;
560 }
561 VERIFY(sk_sys_obj == NULL);
562 }
563
564 struct skmem_region *
skmem_get_sysctls_region(void)565 skmem_get_sysctls_region(void)
566 {
567 return sk_sys_region;
568 }
569
570 void *
skmem_get_sysctls_obj(size_t * size)571 skmem_get_sysctls_obj(size_t *size)
572 {
573 if (size != NULL) {
574 *size = sk_sys_objsize;
575 }
576
577 return sk_sys_obj;
578 }
579
580 /* for VM stats */
581 extern unsigned int vm_page_free_count, vm_page_speculative_count,
582 vm_page_active_count, vm_page_inactive_count, vm_page_inactive_count,
583 vm_page_wire_count, vm_page_throttled_count, vm_lopage_free_count,
584 vm_page_purgeable_count, vm_page_purged_count;
585
586 #define SKMEM_WDT_DUMP_BUF_CHK() do { \
587 clen -= k; \
588 if (clen < 1) \
589 goto done; \
590 c += k; \
591 } while (0)
592
593 /*
594 * The compiler doesn't know that snprintf() supports %b format
595 * specifier, so use our own wrapper to vsnprintf() here instead.
596 */
597 #define skmem_snprintf(str, size, format, ...) ({ \
598 _Pragma("clang diagnostic push") \
599 _Pragma("clang diagnostic ignored \"-Wformat-invalid-specifier\"") \
600 _Pragma("clang diagnostic ignored \"-Wformat-extra-args\"") \
601 _Pragma("clang diagnostic ignored \"-Wformat\"") \
602 snprintf(str, size, format, ## __VA_ARGS__) \
603 _Pragma("clang diagnostic pop"); \
604 })
605
606 __attribute__((noinline, cold, not_tail_called))
607 char *
skmem_dump(struct skmem_region * skr)608 skmem_dump(struct skmem_region *skr)
609 {
610 int k, clen = SKMEM_DUMP_BUF_SIZE;
611 struct skmem_cache *skm;
612 char *c;
613
614 /* allocate space for skmem_dump_buf */
615 if (skmem_dump_buf == NULL) {
616 skmem_dump_buf = (char *) kalloc_data(SKMEM_DUMP_BUF_SIZE,
617 (Z_ZERO | Z_WAITOK));
618 VERIFY(skmem_dump_buf != NULL);
619 } else {
620 bzero(skmem_dump_buf, SKMEM_DUMP_BUF_SIZE);
621 }
622 c = skmem_dump_buf;
623
624 k = skmem_snprintf(c, clen,
625 "Region %p\n"
626 " | Mode : 0x%b\n"
627 " | Memory : [%llu in use [%llu wired]] / [%llu total]\n"
628 " | Transactions : [%llu segment allocs, %llu frees]\n\n",
629 skr, skr->skr_mode, SKR_MODE_BITS, skr->skr_meminuse,
630 skr->skr_w_meminuse, skr->skr_memtotal, skr->skr_alloc,
631 skr->skr_free);
632 SKMEM_WDT_DUMP_BUF_CHK();
633
634 if (skr->skr_mode & SKR_MODE_SLAB) {
635 for (int i = 0; i < SKR_MAX_CACHES; i++) {
636 if ((skm = skr->skr_cache[i]) == NULL) {
637 continue;
638 }
639 k = skmem_snprintf(c, clen, "Cache %p\n"
640 " | Mode : 0x%b\n"
641 " | Memory : [%llu in use] / [%llu total]\n"
642 " | Transactions : [%llu alloc failures]\n"
643 " | [%llu slab creates, %llu destroys]\n"
644 " | [%llu slab allocs, %llu frees]\n\n",
645 skm, skm->skm_mode, SKM_MODE_BITS,
646 skm->skm_sl_bufinuse, skm->skm_sl_bufmax,
647 skm->skm_sl_alloc_fail, skm->skm_sl_create,
648 skm->skm_sl_destroy, skm->skm_sl_alloc,
649 skm->skm_sl_free);
650 SKMEM_WDT_DUMP_BUF_CHK();
651 }
652 }
653
654 k = skmem_snprintf(c, clen,
655 "VM Pages\n"
656 " | Free : %u [%u speculative]\n"
657 " | Active : %u\n"
658 " | Inactive : %u\n"
659 " | Wired : %u [%u throttled, %u lopage_free]\n"
660 " | Purgeable : %u [%u purged]\n",
661 vm_page_free_count, vm_page_speculative_count,
662 vm_page_active_count, vm_page_inactive_count,
663 vm_page_wire_count, vm_page_throttled_count, vm_lopage_free_count,
664 vm_page_purgeable_count, vm_page_purged_count);
665 SKMEM_WDT_DUMP_BUF_CHK();
666
667 done:
668 return skmem_dump_buf;
669 }
670
671 boolean_t
skmem_lowmem_check(void)672 skmem_lowmem_check(void)
673 {
674 unsigned int plevel = kVMPressureNormal;
675 kern_return_t ret;
676
677 ret = mach_vm_pressure_level_monitor(false, &plevel);
678 if (ret == KERN_SUCCESS) {
679 /* kVMPressureCritical is the stage below jetsam */
680 if (plevel >= kVMPressureCritical) {
681 /*
682 * If we are in a low-memory situation, then we
683 * might want to start purging our caches.
684 */
685 return TRUE;
686 }
687 }
688 return FALSE;
689 }
690