1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * File: kern/gzalloc.c
30 * Author: Derek Kumar
31 *
32 * "Guard mode" zone allocator, used to trap use-after-free errors,
33 * overruns, underruns, mismatched allocations/frees, uninitialized
34 * zone element use, timing dependent races etc.
35 *
36 * The allocator is configured by these boot-args:
37 * gzalloc_size=<size>: target all zones with elements of <size> bytes
38 * gzalloc_min=<size>: target zones with elements >= size
39 * gzalloc_max=<size>: target zones with elements <= size
40 * gzalloc_min/max can be specified in conjunction to target a range of
41 * sizes
42 * gzalloc_fc_size=<size>: number of zone elements (effectively page
43 * multiple sized) to retain in the free VA cache. This cache is evicted
44 * (backing pages and VA released) in a least-recently-freed fashion.
45 * Larger free VA caches allow for a longer window of opportunity to trap
46 * delayed use-after-free operations, but use more memory.
47 * -gzalloc_wp: Write protect, rather than unmap, freed allocations
48 * lingering in the free VA cache. Useful to disambiguate between
49 * read-after-frees/read overruns and writes. Also permits direct inspection
50 * of the freed element in the cache via the kernel debugger. As each
51 * element has a "header" (trailer in underflow detection mode), the zone
52 * of origin of the element can be easily determined in this mode.
53 * -gzalloc_uf_mode: Underflow detection mode, where the guard page
54 * adjoining each element is placed *before* the element page rather than
55 * after. The element is also located at the top of the page, rather than
56 * abutting the bottom as with the standard overflow detection mode.
57 * -gzalloc_noconsistency: disable consistency checks that flag mismatched
58 * frees, corruptions of the header/trailer signatures etc.
59 * -nogzalloc_mode: Disables the guard mode allocator. The DEBUG kernel
60 * enables the guard allocator for zones sized 1K (if present) by
61 * default, this option can disable that behaviour.
62 * gzname=<name> target a zone by name. Can be coupled with size-based
63 * targeting. Naming conventions match those of the zlog boot-arg, i.e.
64 * "a period in the logname will match a space in the zone name"
65 * -gzalloc_no_dfree_check Eliminate double free checks
66 * gzalloc_zscale=<value> specify size multiplier for the dedicated gzalloc submap
67 */
68
69 #include <mach/mach_types.h>
70 #include <mach/vm_param.h>
71 #include <mach/kern_return.h>
72 #include <mach/machine/vm_types.h>
73 #include <mach_debug/zone_info.h>
74 #include <mach/vm_map.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/sched.h>
79 #include <kern/locks.h>
80 #include <kern/misc_protos.h>
81 #include <kern/zalloc_internal.h>
82
83 #include <vm/pmap.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_kern.h>
86 #include <vm/vm_page.h>
87
88 #include <pexpert/pexpert.h>
89
90 #include <machine/machparam.h>
91
92 #include <libkern/OSDebug.h>
93 #include <libkern/OSAtomic.h>
94 #include <sys/kdebug.h>
95
96 boolean_t gzalloc_mode = FALSE;
97 uint32_t pdzalloc_count, pdzfree_count;
98
99 #define GZALLOC_MIN_DEFAULT (1024)
100 #define GZDEADZONE ((zone_t) 0xDEAD201E)
101 #define GZALLOC_SIGNATURE (0xABADCAFE)
102 #define GZALLOC_RESERVE_SIZE_DEFAULT (2 * 1024 * 1024)
103 #define GZFC_DEFAULT_SIZE (1536)
104
105 char gzalloc_fill_pattern = 0x67; /* 'g' */
106
107 uint32_t gzalloc_min = ~0U;
108 uint32_t gzalloc_max = 0;
109 uint32_t gzalloc_size = 0;
110 uint64_t gzalloc_allocated, gzalloc_freed, gzalloc_early_alloc, gzalloc_early_free, gzalloc_wasted;
111 boolean_t gzalloc_uf_mode = FALSE, gzalloc_consistency_checks = TRUE, gzalloc_dfree_check = TRUE;
112 vm_prot_t gzalloc_prot = VM_PROT_NONE;
113 uint32_t gzalloc_guard = KMA_GUARD_LAST;
114 uint32_t gzfc_size = GZFC_DEFAULT_SIZE;
115 uint32_t gzalloc_zonemap_scale = 6;
116
117 vm_map_t gzalloc_map;
118 vm_offset_t gzalloc_map_min, gzalloc_map_max;
119 vm_offset_t gzalloc_reserve;
120 vm_size_t gzalloc_reserve_size;
121
122 typedef struct gzalloc_header {
123 zone_t gzone;
124 uint32_t gzsize;
125 uint32_t gzsig;
126 } gzhdr_t;
127
128 #define GZHEADER_SIZE (sizeof(gzhdr_t))
129
130 extern zone_t vm_page_zone;
131
132 static zone_t gztrackzone = NULL;
133 static char gznamedzone[MAX_ZONE_NAME] = "";
134
135 boolean_t
gzalloc_enabled(void)136 gzalloc_enabled(void)
137 {
138 return gzalloc_mode;
139 }
140
141 void
gzalloc_zone_init(zone_t z)142 gzalloc_zone_init(zone_t z)
143 {
144 if (gzalloc_mode == 0) {
145 return;
146 }
147
148 bzero(&z->gz, sizeof(z->gz));
149
150 if (track_this_zone(z->z_name, gznamedzone)) {
151 gztrackzone = z;
152 }
153
154 z->z_gzalloc_tracked = (z == gztrackzone) ||
155 ((zone_elem_size(z) >= gzalloc_min) && (zone_elem_size(z) <= gzalloc_max));
156
157 if (gzfc_size && z->z_gzalloc_tracked) {
158 vm_size_t gzfcsz = round_page(sizeof(*z->gz.gzfc) * gzfc_size);
159 kern_return_t kr;
160
161 /* If the VM/kmem system aren't yet configured, carve
162 * out the free element cache structure directly from the
163 * gzalloc_reserve supplied by the pmap layer.
164 */
165 if (__improbable(startup_phase < STARTUP_SUB_KMEM)) {
166 if (gzalloc_reserve_size < gzfcsz) {
167 panic("gzalloc reserve exhausted");
168 }
169
170 z->gz.gzfc = (vm_offset_t *)gzalloc_reserve;
171 gzalloc_reserve += gzfcsz;
172 gzalloc_reserve_size -= gzfcsz;
173 bzero(z->gz.gzfc, gzfcsz);
174 } else {
175 kr = kernel_memory_allocate(kernel_map,
176 (vm_offset_t *)&z->gz.gzfc, gzfcsz, 0,
177 KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_OSFMK);
178 if (kr != KERN_SUCCESS) {
179 panic("%s: kernel_memory_allocate failed (%d) for 0x%lx bytes",
180 __func__, kr, (unsigned long)gzfcsz);
181 }
182 }
183 }
184 }
185
186 /* Called by zdestroy() to dump the free cache elements so the zone count can drop to zero. */
187 void
gzalloc_empty_free_cache(zone_t zone)188 gzalloc_empty_free_cache(zone_t zone)
189 {
190 kern_return_t kr;
191 int freed_elements = 0;
192 vm_offset_t free_addr = 0;
193 vm_offset_t rounded_size = round_page(zone_elem_size(zone) + GZHEADER_SIZE);
194 vm_offset_t gzfcsz = round_page(sizeof(*zone->gz.gzfc) * gzfc_size);
195 vm_offset_t gzfc_copy;
196
197 assert(zone->z_gzalloc_tracked); // the caller is responsible for checking
198
199 kr = kmem_alloc(kernel_map, &gzfc_copy, gzfcsz, VM_KERN_MEMORY_OSFMK);
200 if (kr != KERN_SUCCESS) {
201 panic("gzalloc_empty_free_cache: kmem_alloc: 0x%x", kr);
202 }
203
204 /* Reset gzalloc_data. */
205 zone_lock(zone);
206 memcpy((void *)gzfc_copy, (void *)zone->gz.gzfc, gzfcsz);
207 bzero((void *)zone->gz.gzfc, gzfcsz);
208 zone->gz.gzfc_index = 0;
209 zone_unlock(zone);
210
211 /* Free up all the cached elements. */
212 for (uint32_t index = 0; index < gzfc_size; index++) {
213 free_addr = ((vm_offset_t *)gzfc_copy)[index];
214 if (free_addr && free_addr >= gzalloc_map_min && free_addr < gzalloc_map_max) {
215 kr = vm_map_remove(gzalloc_map, free_addr,
216 free_addr + rounded_size + (1 * PAGE_SIZE),
217 VM_MAP_REMOVE_KUNWIRE);
218 if (kr != KERN_SUCCESS) {
219 panic("gzalloc_empty_free_cache: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);
220 }
221 OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
222 OSAddAtomic64(-((SInt32) (rounded_size - zone_elem_size(zone))), &gzalloc_wasted);
223
224 freed_elements++;
225 }
226 }
227 /*
228 * TODO: Consider freeing up zone->gz.gzfc as well if it didn't come from the gzalloc_reserve pool.
229 * For now we're reusing this buffer across zdestroy's. We would have to allocate it again on a
230 * subsequent zinit() as well.
231 */
232
233 /* Decrement zone counters. */
234 zone_lock(zone);
235 zone->z_elems_free += freed_elements;
236 zone->z_wired_cur -= freed_elements;
237 zone_unlock(zone);
238
239 kmem_free(kernel_map, gzfc_copy, gzfcsz);
240 }
241
242 __startup_func
243 static void
gzalloc_configure(void)244 gzalloc_configure(void)
245 {
246 #if !KASAN_ZALLOC
247 char temp_buf[16];
248
249 if (PE_parse_boot_argn("-gzalloc_mode", temp_buf, sizeof(temp_buf))) {
250 gzalloc_mode = TRUE;
251 gzalloc_min = GZALLOC_MIN_DEFAULT;
252 gzalloc_max = ~0U;
253 }
254
255 if (PE_parse_boot_argn("gzalloc_min", &gzalloc_min, sizeof(gzalloc_min))) {
256 gzalloc_mode = TRUE;
257 gzalloc_max = ~0U;
258 }
259
260 if (PE_parse_boot_argn("gzalloc_max", &gzalloc_max, sizeof(gzalloc_max))) {
261 gzalloc_mode = TRUE;
262 if (gzalloc_min == ~0U) {
263 gzalloc_min = 0;
264 }
265 }
266
267 if (PE_parse_boot_argn("gzalloc_size", &gzalloc_size, sizeof(gzalloc_size))) {
268 gzalloc_min = gzalloc_max = gzalloc_size;
269 gzalloc_mode = TRUE;
270 }
271
272 (void)PE_parse_boot_argn("gzalloc_fc_size", &gzfc_size, sizeof(gzfc_size));
273
274 if (PE_parse_boot_argn("-gzalloc_wp", temp_buf, sizeof(temp_buf))) {
275 gzalloc_prot = VM_PROT_READ;
276 }
277
278 if (PE_parse_boot_argn("-gzalloc_uf_mode", temp_buf, sizeof(temp_buf))) {
279 gzalloc_uf_mode = TRUE;
280 gzalloc_guard = KMA_GUARD_FIRST;
281 }
282
283 if (PE_parse_boot_argn("-gzalloc_no_dfree_check", temp_buf, sizeof(temp_buf))) {
284 gzalloc_dfree_check = FALSE;
285 }
286
287 (void) PE_parse_boot_argn("gzalloc_zscale", &gzalloc_zonemap_scale, sizeof(gzalloc_zonemap_scale));
288
289 if (PE_parse_boot_argn("-gzalloc_noconsistency", temp_buf, sizeof(temp_buf))) {
290 gzalloc_consistency_checks = FALSE;
291 }
292
293 if (PE_parse_boot_argn("gzname", gznamedzone, sizeof(gznamedzone))) {
294 gzalloc_mode = TRUE;
295 }
296 #if DEBUG
297 if (gzalloc_mode == FALSE) {
298 gzalloc_min = 1024;
299 gzalloc_max = 1024;
300 strlcpy(gznamedzone, "pmap", sizeof(gznamedzone));
301 gzalloc_prot = VM_PROT_READ;
302 gzalloc_mode = TRUE;
303 }
304 #endif
305 if (PE_parse_boot_argn("-nogzalloc_mode", temp_buf, sizeof(temp_buf))) {
306 gzalloc_mode = FALSE;
307 }
308
309 if (gzalloc_mode) {
310 gzalloc_reserve_size = GZALLOC_RESERVE_SIZE_DEFAULT;
311 gzalloc_reserve = (vm_offset_t) pmap_steal_memory(gzalloc_reserve_size);
312 }
313 #endif
314 }
315 STARTUP(PMAP_STEAL, STARTUP_RANK_FIRST, gzalloc_configure);
316
317 void
gzalloc_init(vm_size_t max_zonemap_size)318 gzalloc_init(vm_size_t max_zonemap_size)
319 {
320 kern_return_t retval;
321
322 if (gzalloc_mode) {
323 vm_map_kernel_flags_t vmk_flags;
324
325 vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
326 vmk_flags.vmkf_permanent = TRUE;
327 retval = kmem_suballoc(kernel_map, &gzalloc_map_min,
328 (max_zonemap_size * gzalloc_zonemap_scale),
329 VM_MAP_CREATE_DEFAULT, VM_FLAGS_ANYWHERE, vmk_flags,
330 VM_KERN_MEMORY_ZONE, &gzalloc_map);
331
332 if (retval != KERN_SUCCESS) {
333 panic("zone_init: kmem_suballoc(gzalloc_map, 0x%lx, %u) failed",
334 max_zonemap_size, gzalloc_zonemap_scale);
335 }
336 gzalloc_map_max = gzalloc_map_min + (max_zonemap_size * gzalloc_zonemap_scale);
337 }
338 }
339
340 vm_offset_t
gzalloc_alloc(zone_t zone,zone_stats_t zstats,zalloc_flags_t flags)341 gzalloc_alloc(zone_t zone, zone_stats_t zstats, zalloc_flags_t flags)
342 {
343 vm_offset_t addr = 0;
344
345 assert(zone->z_gzalloc_tracked); // the caller is responsible for checking
346
347 if (get_preemption_level() != 0) {
348 if (flags & Z_NOWAIT) {
349 return 0;
350 }
351 pdzalloc_count++;
352 }
353
354 bool kmem_ready = (startup_phase >= STARTUP_SUB_KMEM);
355 vm_offset_t rounded_size = round_page(zone_elem_size(zone) + GZHEADER_SIZE);
356 vm_offset_t residue = rounded_size - zone_elem_size(zone);
357 vm_offset_t gzaddr = 0;
358 gzhdr_t *gzh, *gzhcopy = NULL;
359 bool new_va = false;
360
361 if (!kmem_ready || (vm_page_zone == ZONE_NULL)) {
362 /* Early allocations are supplied directly from the
363 * reserve.
364 */
365 if (gzalloc_reserve_size < (rounded_size + PAGE_SIZE)) {
366 panic("gzalloc reserve exhausted");
367 }
368 gzaddr = gzalloc_reserve;
369 /* No guard page for these early allocations, just
370 * waste an additional page.
371 */
372 gzalloc_reserve += rounded_size + PAGE_SIZE;
373 gzalloc_reserve_size -= rounded_size + PAGE_SIZE;
374 OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_alloc);
375 } else {
376 kern_return_t kr = kernel_memory_allocate(gzalloc_map,
377 &gzaddr, rounded_size + (1 * PAGE_SIZE),
378 0, KMA_KOBJECT | KMA_ATOMIC | gzalloc_guard,
379 VM_KERN_MEMORY_OSFMK);
380 if (kr != KERN_SUCCESS) {
381 panic("gzalloc: kernel_memory_allocate for size 0x%llx failed with %d",
382 (uint64_t)rounded_size, kr);
383 }
384 new_va = true;
385 }
386
387 if (gzalloc_uf_mode) {
388 gzaddr += PAGE_SIZE;
389 /* The "header" becomes a "footer" in underflow
390 * mode.
391 */
392 gzh = (gzhdr_t *) (gzaddr + zone_elem_size(zone));
393 addr = gzaddr;
394 gzhcopy = (gzhdr_t *) (gzaddr + rounded_size - sizeof(gzhdr_t));
395 } else {
396 gzh = (gzhdr_t *) (gzaddr + residue - GZHEADER_SIZE);
397 addr = (gzaddr + residue);
398 }
399
400 /*
401 * All zone allocations are always zeroed
402 */
403 bzero((void *)gzaddr, rounded_size);
404
405 gzh->gzone = (kmem_ready && vm_page_zone) ? zone : GZDEADZONE;
406 gzh->gzsize = (uint32_t)zone_elem_size(zone);
407 gzh->gzsig = GZALLOC_SIGNATURE;
408
409 /* In underflow detection mode, stash away a copy of the
410 * metadata at the edge of the allocated range, for
411 * retrieval by gzalloc_element_size()
412 */
413 if (gzhcopy) {
414 *gzhcopy = *gzh;
415 }
416
417 zone_lock(zone);
418 assert(zone->z_self == zone);
419 zone->z_elems_free--;
420 if (new_va) {
421 zone->z_va_cur += 1;
422 }
423 zone->z_wired_cur += 1;
424 zpercpu_get(zstats)->zs_mem_allocated += rounded_size;
425 zone_unlock(zone);
426
427 OSAddAtomic64((SInt32) rounded_size, &gzalloc_allocated);
428 OSAddAtomic64((SInt32) (rounded_size - zone_elem_size(zone)), &gzalloc_wasted);
429
430 return addr;
431 }
432
433 void
gzalloc_free(zone_t zone,zone_stats_t zstats,void * addr)434 gzalloc_free(zone_t zone, zone_stats_t zstats, void *addr)
435 {
436 kern_return_t kr;
437
438 assert(zone->z_gzalloc_tracked); // the caller is responsible for checking
439
440 gzhdr_t *gzh;
441 vm_offset_t rounded_size = round_page(zone_elem_size(zone) + GZHEADER_SIZE);
442 vm_offset_t residue = rounded_size - zone_elem_size(zone);
443 vm_offset_t saddr;
444 vm_offset_t free_addr = 0;
445
446 if (gzalloc_uf_mode) {
447 gzh = (gzhdr_t *)((vm_offset_t)addr + zone_elem_size(zone));
448 saddr = (vm_offset_t) addr - PAGE_SIZE;
449 } else {
450 gzh = (gzhdr_t *)((vm_offset_t)addr - GZHEADER_SIZE);
451 saddr = ((vm_offset_t)addr) - residue;
452 }
453
454 if ((saddr & PAGE_MASK) != 0) {
455 panic("%s: invalid address supplied: "
456 "%p (adjusted: 0x%lx) for zone with element sized 0x%lx\n",
457 __func__, addr, saddr, zone_elem_size(zone));
458 }
459
460 if (gzfc_size && gzalloc_dfree_check) {
461 zone_lock(zone);
462 assert(zone->z_self == zone);
463 for (uint32_t gd = 0; gd < gzfc_size; gd++) {
464 if (zone->gz.gzfc[gd] != saddr) {
465 continue;
466 }
467 panic("%s: double free detected, freed address: 0x%lx, "
468 "current free cache index: %d, freed index: %d",
469 __func__, saddr, zone->gz.gzfc_index, gd);
470 }
471 zone_unlock(zone);
472 }
473
474 if (gzalloc_consistency_checks) {
475 if (gzh->gzsig != GZALLOC_SIGNATURE) {
476 panic("GZALLOC signature mismatch for element %p, "
477 "expected 0x%x, found 0x%x",
478 addr, GZALLOC_SIGNATURE, gzh->gzsig);
479 }
480
481 if (gzh->gzone != zone && (gzh->gzone != GZDEADZONE)) {
482 panic("%s: Mismatched zone or under/overflow, "
483 "current zone: %p, recorded zone: %p, address: %p",
484 __func__, zone, gzh->gzone, (void *)addr);
485 }
486 /* Partially redundant given the zone check, but may flag header corruption */
487 if (gzh->gzsize != zone_elem_size(zone)) {
488 panic("Mismatched zfree or under/overflow for zone %p, "
489 "recorded size: 0x%x, element size: 0x%x, address: %p",
490 zone, gzh->gzsize, (uint32_t)zone_elem_size(zone), (void *)addr);
491 }
492
493 char *gzc, *checkstart, *checkend;
494 if (gzalloc_uf_mode) {
495 checkstart = (char *) ((uintptr_t) gzh + sizeof(gzh));
496 checkend = (char *) ((((vm_offset_t)addr) & ~PAGE_MASK) + PAGE_SIZE);
497 } else {
498 checkstart = (char *) trunc_page_64(addr);
499 checkend = (char *)gzh;
500 }
501
502 for (gzc = checkstart; gzc < checkend; gzc++) {
503 if (*gzc == gzalloc_fill_pattern) {
504 continue;
505 }
506 panic("%s: detected over/underflow, byte at %p, element %p, "
507 "contents 0x%x from 0x%lx byte sized zone (%s%s) "
508 "doesn't match fill pattern (%c)",
509 __func__, gzc, addr, *gzc, zone_elem_size(zone),
510 zone_heap_name(zone), zone->z_name, gzalloc_fill_pattern);
511 }
512 }
513
514 if ((startup_phase < STARTUP_SUB_KMEM) || gzh->gzone == GZDEADZONE) {
515 /* For now, just leak frees of early allocations
516 * performed before kmem is fully configured.
517 * They don't seem to get freed currently;
518 * consider ml_static_mfree in the future.
519 */
520 OSAddAtomic64((SInt32) (rounded_size), &gzalloc_early_free);
521 return;
522 }
523
524 if (get_preemption_level() != 0) {
525 pdzfree_count++;
526 }
527
528 if (gzfc_size) {
529 /* Either write protect or unmap the newly freed
530 * allocation
531 */
532 kr = vm_map_protect(gzalloc_map, saddr,
533 saddr + rounded_size + (1 * PAGE_SIZE),
534 gzalloc_prot, FALSE);
535 if (kr != KERN_SUCCESS) {
536 panic("%s: vm_map_protect: %p, 0x%x", __func__, (void *)saddr, kr);
537 }
538 } else {
539 free_addr = saddr;
540 }
541
542 zone_lock(zone);
543 assert(zone->z_self == zone);
544
545 /* Insert newly freed element into the protected free element
546 * cache, and rotate out the LRU element.
547 */
548 if (gzfc_size) {
549 if (zone->gz.gzfc_index >= gzfc_size) {
550 zone->gz.gzfc_index = 0;
551 }
552 free_addr = zone->gz.gzfc[zone->gz.gzfc_index];
553 zone->gz.gzfc[zone->gz.gzfc_index++] = saddr;
554 }
555
556 if (free_addr) {
557 zone->z_elems_free++;
558 zone->z_wired_cur -= 1;
559 }
560
561 zpercpu_get(zstats)->zs_mem_freed += rounded_size;
562 zone_unlock(zone);
563
564 if (free_addr) {
565 // TODO: consider using physical reads to check for
566 // corruption while on the protected freelist
567 // (i.e. physical corruption)
568 kr = vm_map_remove(gzalloc_map, free_addr,
569 free_addr + rounded_size + (1 * PAGE_SIZE),
570 VM_MAP_REMOVE_KUNWIRE);
571 if (kr != KERN_SUCCESS) {
572 panic("gzfree: vm_map_remove: %p, 0x%x", (void *)free_addr, kr);
573 }
574 // TODO: sysctl-ize for quick reference
575 OSAddAtomic64((SInt32)rounded_size, &gzalloc_freed);
576 OSAddAtomic64(-((SInt32) (rounded_size - zone_elem_size(zone))),
577 &gzalloc_wasted);
578 }
579 }
580
581 boolean_t
gzalloc_element_size(void * gzaddr,zone_t * z,vm_size_t * gzsz)582 gzalloc_element_size(void *gzaddr, zone_t *z, vm_size_t *gzsz)
583 {
584 uintptr_t a = (uintptr_t)gzaddr;
585 if (__improbable(gzalloc_mode && (a >= gzalloc_map_min) && (a < gzalloc_map_max))) {
586 gzhdr_t *gzh;
587 boolean_t vmef;
588 vm_map_entry_t gzvme = NULL;
589 vm_map_lock_read(gzalloc_map);
590 vmef = vm_map_lookup_entry(gzalloc_map, (vm_map_offset_t)a, &gzvme);
591 vm_map_unlock(gzalloc_map);
592 if (vmef == FALSE) {
593 panic("GZALLOC: unable to locate map entry for %p", (void *)a);
594 }
595 assertf(gzvme->vme_atomic != 0, "GZALLOC: VM map entry inconsistency, "
596 "vme: %p, start: %llu end: %llu", gzvme, gzvme->vme_start, gzvme->vme_end);
597
598 /* Locate the gzalloc metadata adjoining the element */
599 if (gzalloc_uf_mode == TRUE) {
600 /* In underflow detection mode, locate the map entry describing
601 * the element, and then locate the copy of the gzalloc
602 * header at the trailing edge of the range.
603 */
604 gzh = (gzhdr_t *)(gzvme->vme_end - GZHEADER_SIZE);
605 } else {
606 /* In overflow detection mode, scan forward from
607 * the base of the map entry to locate the
608 * gzalloc header.
609 */
610 uint32_t *p = (uint32_t*) gzvme->vme_start;
611 while (p < (uint32_t *) gzvme->vme_end) {
612 if (*p == GZALLOC_SIGNATURE) {
613 break;
614 } else {
615 p++;
616 }
617 }
618 if (p >= (uint32_t *) gzvme->vme_end) {
619 panic("GZALLOC signature missing addr %p, zone %p", gzaddr, z);
620 }
621 p++;
622 uintptr_t q = (uintptr_t) p;
623 gzh = (gzhdr_t *) (q - sizeof(gzhdr_t));
624 }
625
626 if (gzh->gzsig != GZALLOC_SIGNATURE) {
627 panic("GZALLOC signature mismatch for element %p, expected 0x%x, found 0x%x",
628 (void *)a, GZALLOC_SIGNATURE, gzh->gzsig);
629 }
630
631 *gzsz = zone_elem_size(gzh->gzone);
632 if (__improbable(!gzh->gzone->z_gzalloc_tracked)) {
633 panic("GZALLOC: zone mismatch (%p)", gzh->gzone);
634 }
635
636 if (z) {
637 *z = gzh->gzone;
638 }
639 return TRUE;
640 } else {
641 return FALSE;
642 }
643 }
644