1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: zalloc.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1985 62 * 63 */ 64 65 #ifdef KERNEL_PRIVATE 66 67 #ifndef _KERN_ZALLOC_H_ 68 #define _KERN_ZALLOC_H_ 69 70 #include <mach/machine/vm_types.h> 71 #include <mach_debug/zone_info.h> 72 #include <kern/kern_types.h> 73 #include <sys/cdefs.h> 74 75 #ifdef XNU_KERNEL_PRIVATE 76 #include <kern/startup.h> 77 #endif /* XNU_KERNEL_PRIVATE */ 78 79 #if XNU_KERNEL_PRIVATE && !defined(ZALLOC_ALLOW_DEPRECATED) 80 #define __zalloc_deprecated(msg) __deprecated_msg(msg) 81 #else 82 #define __zalloc_deprecated(msg) 83 #endif 84 85 __BEGIN_DECLS 86 87 /*! 88 * @macro __zpercpu 89 * 90 * @abstract 91 * Annotation that helps denoting a per-cpu pointer that requires usage of 92 * @c zpercpu_*() for access. 93 */ 94 #define __zpercpu 95 96 /*! 97 * @typedef zone_id_t 98 * 99 * @abstract 100 * The type for a zone ID. 101 */ 102 typedef uint16_t zone_id_t; 103 104 /** 105 * @enum zone_create_flags_t 106 * 107 * @abstract 108 * Set of flags to pass to zone_create(). 109 * 110 * @discussion 111 * Some kernel-wide policies affect all possible created zones. 112 * Explicit @c ZC_* win over such policies. 113 */ 114 __options_decl(zone_create_flags_t, uint64_t, { 115 /** The default value to pass to zone_create() */ 116 ZC_NONE = 0x00000000, 117 118 /** Force the created zone to use VA sequestering */ 119 ZC_SEQUESTER = 0x00000001, 120 /** Force the created zone @b NOT to use VA sequestering */ 121 ZC_NOSEQUESTER = 0x00000002, 122 123 /** Enable per-CPU zone caching for this zone */ 124 ZC_CACHING = 0x00000010, 125 /** Disable per-CPU zone caching for this zone */ 126 ZC_NOCACHING = 0x00000020, 127 128 /** Allocate zone pages as Read-only **/ 129 ZC_READONLY = 0x00800000, 130 131 /** Mark zone as a per-cpu zone */ 132 ZC_PERCPU = 0x01000000, 133 134 /** Force the created zone to clear every allocation on free */ 135 ZC_ZFREE_CLEARMEM = 0x02000000, 136 137 /** Mark zone as non collectable by zone_gc() */ 138 ZC_NOGC = 0x04000000, 139 140 /** Do not encrypt this zone during hibernation */ 141 ZC_NOENCRYPT = 0x08000000, 142 143 /** Type requires alignment to be preserved */ 144 ZC_ALIGNMENT_REQUIRED = 0x10000000, 145 146 /** Do not track this zone when gzalloc is engaged */ 147 ZC_NOGZALLOC = 0x20000000, 148 149 /** Don't asynchronously replenish the zone via callouts */ 150 ZC_NOCALLOUT = 0x40000000, 151 152 /** Can be zdestroy()ed, not default unlike zinit() */ 153 ZC_DESTRUCTIBLE = 0x80000000, 154 155 #ifdef XNU_KERNEL_PRIVATE 156 /** Zone doesn't support TBI tagging */ 157 ZC_NOTBITAG = 0x0200000000000000, 158 159 /** This zone will back a kalloc type */ 160 ZC_KALLOC_TYPE = 0x0400000000000000, 161 162 /** This zone will back a kalloc heap */ 163 ZC_KALLOC_HEAP = 0x0800000000000000, 164 165 /** This zone can be crammed with foreign pages */ 166 ZC_ALLOW_FOREIGN = 0x1000000000000000, 167 168 /** This zone belongs to the VM submap */ 169 ZC_VM = 0x2000000000000000, 170 #if __LP64__ 171 #define ZC_VM_LP64 ZC_VM 172 #else 173 #define ZC_VM_LP64 ZC_NONE 174 #endif 175 176 /** Disable kasan quarantine for this zone */ 177 ZC_KASAN_NOQUARANTINE = 0x4000000000000000, 178 179 /** Disable kasan redzones for this zone */ 180 ZC_KASAN_NOREDZONE = 0x8000000000000000, 181 182 #endif 183 }); 184 185 /*! 186 * @union zone_or_view 187 * 188 * @abstract 189 * A type used for calls that admit both a zone or a zone view. 190 * 191 * @discussion 192 * @c zalloc() and @c zfree() and their variants can act on both 193 * zones and zone views. 194 */ 195 union zone_or_view { 196 struct zone_view *zov_view; 197 struct zone *zov_zone; 198 struct kalloc_type_view *zov_kt_heap; 199 #ifdef __cplusplus zone_or_view(struct zone_view * zv)200 inline zone_or_view(struct zone_view *zv) : zov_view(zv) { 201 } zone_or_view(struct zone * z)202 inline zone_or_view(struct zone *z) : zov_zone(z) { 203 } zone_or_view(struct kalloc_type_view * kth)204 inline zone_or_view(struct kalloc_type_view *kth) : zov_kt_heap(kth) { 205 } 206 #endif 207 }; 208 #ifdef __cplusplus 209 typedef union zone_or_view zone_or_view_t; 210 #else 211 typedef union zone_or_view zone_or_view_t __attribute__((transparent_union)); 212 #endif 213 214 /*! 215 * @function zone_create 216 * 217 * @abstract 218 * Creates a zone with the specified parameters. 219 * 220 * @discussion 221 * A Zone is a slab allocator that returns objects of a given size very quickly. 222 * 223 * @param name the name for the new zone. 224 * @param size the size of the elements returned by this zone. 225 * @param flags a set of @c zone_create_flags_t flags. 226 * 227 * @returns the created zone, this call never fails. 228 */ 229 extern zone_t zone_create( 230 const char *name, 231 vm_size_t size, 232 zone_create_flags_t flags); 233 234 /*! 235 * @function zdestroy 236 * 237 * @abstract 238 * Destroys a zone previously made with zone_create. 239 * 240 * @discussion 241 * Zones must have been made destructible for @c zdestroy() to be allowed, 242 * passing @c ZC_DESTRUCTIBLE at @c zone_create() time. 243 * 244 * @param zone the zone to destroy. 245 */ 246 extern void zdestroy( 247 zone_t zone); 248 249 /*! 250 * @function zone_require 251 * 252 * @abstract 253 * Requires for a given pointer to belong to the specified zone. 254 * 255 * @discussion 256 * The function panics if the check fails as it indicates that the kernel 257 * internals have been compromised. 258 * 259 * Note that zone_require() can only work with: 260 * - zones not allowing foreign memory 261 * - zones in the general submap. 262 * 263 * @param zone the zone the address needs to belong to. 264 * @param addr the element address to check. 265 */ 266 extern void zone_require( 267 zone_t zone, 268 void *addr); 269 270 /*! 271 * @function zone_require_ro 272 * 273 * @abstract 274 * Version of zone require intended for zones created with ZC_READONLY 275 * 276 * @discussion 277 * This check is not sufficient to fully trust the element. 278 * 279 * Another check of its content must be performed to prove 280 * that the element is "the right one", a typical technique 281 * for when the RO data structure is 1:1 with a mutable one, 282 * is a simple circularity check with a very strict lifetime 283 * (both the mutable and read-only data structures are made 284 * and destroyed as close as possible). 285 * 286 * @param zone_id the zone id the address needs to belong to. 287 * @param elem_size the element size for this zone. 288 * @param addr the element address to check. 289 */ 290 extern void zone_require_ro( 291 zone_id_t zone_id, 292 vm_size_t elem_size, 293 void *addr); 294 295 /*! 296 * @function zone_require_ro_range_contains 297 * 298 * @abstract 299 * Version of zone require intended for zones created with ZC_READONLY 300 * that only checks that the zone is RO and that the address is in 301 * the zone's submap 302 * 303 * @param zone_id the zone id the address needs to belong to. 304 * @param addr the element address to check. 305 */ 306 extern void zone_require_ro_range_contains( 307 zone_id_t zone_id, 308 void *addr); 309 310 /*! 311 * @enum zalloc_flags_t 312 * 313 * @brief 314 * Flags that can be passed to @c zalloc_internal or @c zalloc_flags. 315 * 316 * @discussion 317 * It is encouraged that any callsite passing flags uses exactly one of: 318 * @c Z_WAITOK, @c Z_NOWAIT or @c Z_NOPAGEWAIT, the default being @c Z_WAITOK 319 * if nothing else was specified. 320 * 321 * If any @c Z_NO*WAIT flag is passed alongside @c Z_WAITOK, 322 * then @c Z_WAITOK is ignored. 323 * 324 * @const Z_WAITOK 325 * Means that it's OK for zalloc() to block to wait for memory, 326 * when Z_WAITOK is passed, zalloc will never return NULL. 327 * 328 * @const Z_NOWAIT 329 * Passing this flag means that zalloc is not allowed to ever block. 330 * 331 * @const Z_NOPAGEWAIT 332 * Passing this flag means that zalloc is allowed to wait due to lock 333 * contention, but will not wait for the VM to wait for pages when 334 * under memory pressure. 335 * 336 * @const Z_ZERO 337 * Passing this flags means that the returned memory has been zeroed out. 338 * 339 * @const Z_NOFAIL 340 * Passing this flag means that the caller expects the allocation to always 341 * succeed. This will result in a panic if this assumption isn't correct. 342 * 343 * This flag is incompatible with @c Z_NOWAIT or @c Z_NOPAGEWAIT. It also can't 344 * be used on exhaustible zones. 345 * 346 #if XNU_KERNEL_PRIVATE 347 * 348 * @const Z_NOZZC 349 * Used internally to mark allocations that will skip zero validation. 350 * 351 * @const Z_PCPU 352 * Used internally for the percpu paths. 353 * 354 * @const Z_VM_TAG_MASK 355 * Represents bits in which a vm_tag_t for the allocation can be passed. 356 * (used by kalloc for the zone tagging debugging feature). 357 #endif 358 */ 359 __options_decl(zalloc_flags_t, uint32_t, { 360 // values smaller than 0xff are shared with the M_* flags from BSD MALLOC 361 Z_WAITOK = 0x0000, 362 Z_NOWAIT = 0x0001, 363 Z_NOPAGEWAIT = 0x0002, 364 Z_ZERO = 0x0004, 365 366 #if XNU_KERNEL_PRIVATE 367 Z_PCPU = 0x2000, 368 Z_NOZZC = 0x4000, 369 #endif /* XNU_KERNEL_PRIVATE */ 370 Z_NOFAIL = 0x8000, 371 372 /* convenient c++ spellings */ 373 Z_NOWAIT_ZERO = Z_NOWAIT | Z_ZERO, 374 Z_WAITOK_ZERO = Z_WAITOK | Z_ZERO, 375 Z_WAITOK_ZERO_NOFAIL = Z_WAITOK | Z_ZERO | Z_NOFAIL, /* convenient spelling for c++ */ 376 #if XNU_KERNEL_PRIVATE 377 /** used by kalloc to propagate vm tags for -zt */ 378 Z_VM_TAG_MASK = 0xffff0000, 379 380 #define Z_VM_TAG_SHIFT 16 381 #define Z_VM_TAG(tag) ((zalloc_flags_t)(tag) << Z_VM_TAG_SHIFT) 382 #endif 383 }); 384 385 /*! 386 * @function zalloc 387 * 388 * @abstract 389 * Allocates an element from a specified zone. 390 * 391 * @discussion 392 * If the zone isn't exhaustible and is expandable, this call never fails. 393 * 394 * @param zone_or_view the zone or zone view to allocate from 395 * 396 * @returns NULL or the allocated element 397 */ 398 extern void *zalloc( 399 zone_or_view_t zone_or_view); 400 401 /*! 402 * @function zalloc_noblock 403 * 404 * @abstract 405 * Allocates an element from a specified zone, but never blocks. 406 * 407 * @discussion 408 * This call is suitable for preemptible code, however allocation 409 * isn't allowed from interrupt context. 410 * 411 * @param zone_or_view the zone or zone view to allocate from 412 * 413 * @returns NULL or the allocated element 414 */ 415 extern void *zalloc_noblock( 416 zone_or_view_t zone_or_view); 417 418 /*! 419 * @function zalloc_flags() 420 * 421 * @abstract 422 * Allocates an element from a specified zone, with flags. 423 * 424 * @param zone_or_view the zone or zone view to allocate from 425 * @param flags a collection of @c zalloc_flags_t. 426 * 427 * @returns NULL or the allocated element 428 */ 429 extern void *zalloc_flags( 430 zone_or_view_t zone_or_view, 431 zalloc_flags_t flags); 432 433 /*! 434 * @function zalloc_ro 435 * 436 * @abstract 437 * Allocates an element from a specified read-only zone. 438 * 439 * @param zone_id the zone id to allocate from 440 * @param flags a collection of @c zalloc_flags_t. 441 * 442 * @returns NULL or the allocated element 443 */ 444 extern void *zalloc_ro( 445 zone_id_t zone_id, 446 zalloc_flags_t flags); 447 448 /*! 449 * @function zalloc_ro_mut 450 * 451 * @abstract 452 * Modifies an element from a specified read-only zone. 453 * 454 * @discussion 455 * Modifying compiler-assisted authenticated pointers using this function will 456 * not result in a signed pointer being written. The caller is expected to 457 * sign the value appropriately beforehand if they wish to do this. 458 * 459 * @param zone_id the zone id to allocate from 460 * @param elem element to be modified 461 * @param offset offset from element 462 * @param new_data pointer to new data 463 * @param new_data_size size of modification 464 * 465 */ 466 extern void zalloc_ro_mut( 467 zone_id_t zone_id, 468 void *elem, 469 vm_offset_t offset, 470 const void *new_data, 471 vm_size_t new_data_size); 472 473 /*! 474 * @function zalloc_ro_update_elem 475 * 476 * @abstract 477 * Update the value of an entire element allocated in the read only allocator. 478 * 479 * @param zone_id the zone id to allocate from 480 * @param elem element to be modified 481 * @param new_data pointer to new data 482 * 483 */ 484 #define zalloc_ro_update_elem(zone_id, elem, new_data) ({ \ 485 const typeof(*(elem)) *__new_data = (new_data); \ 486 zalloc_ro_mut(zone_id, elem, 0, __new_data, sizeof(*__new_data)); \ 487 }) 488 489 /*! 490 * @function zalloc_ro_update_field 491 * 492 * @abstract 493 * Update a single field of an element allocated in the read only allocator. 494 * 495 * @param zone_id the zone id to allocate from 496 * @param elem element to be modified 497 * @param field the element field to be modified 498 * @param new_data pointer to new data 499 * 500 */ 501 #define zalloc_ro_update_field(zone_id, elem, field, value) ({ \ 502 const typeof((elem)->field) *__value = (value); \ 503 zalloc_ro_mut(zone_id, elem, offsetof(typeof(*(elem)), field), \ 504 __value, sizeof((elem)->field)); \ 505 }) 506 507 /*! 508 * @function zalloc_ro_clear 509 * 510 * @abstract 511 * Zeroes an element from a specified read-only zone. 512 * 513 * @param zone_id the zone id to allocate from 514 * @param elem element to be modified 515 * @param offset offset from element 516 * @param size size of modification 517 */ 518 extern void zalloc_ro_clear( 519 zone_id_t zone_id, 520 void *elem, 521 vm_offset_t offset, 522 vm_size_t size); 523 524 /*! 525 * @function zalloc_ro_clear_field 526 * 527 * @abstract 528 * Zeroes the specified field of an element from a specified read-only zone. 529 * 530 * @param zone_id the zone id to allocate from 531 * @param elem element to be modified 532 * @param field offset from element 533 */ 534 #define zalloc_ro_clear_field(zone_id, elem, field) \ 535 zalloc_ro_clear(zone_id, elem, offsetof(typeof(*(elem)), field), \ 536 sizeof((elem)->field)) 537 538 /*! 539 * @function zfree_ro() 540 * 541 * @abstract 542 * Frees an element previously allocated with @c zalloc_ro(). 543 * 544 * @param zone_id the zone id to free the element to. 545 * @param addr the address to free 546 */ 547 extern void zfree_ro( 548 zone_id_t zone_id, 549 void *addr); 550 551 /*! 552 * @function zfree 553 * 554 * @abstract 555 * Frees an element allocated with @c zalloc*. 556 * 557 * @discussion 558 * If the element being freed doesn't belong to the specified zone, 559 * then this call will panic. 560 * 561 * @param zone_or_view the zone or zone view to free the element to. 562 * @param elem the element to free 563 */ 564 extern void zfree( 565 zone_or_view_t zone_or_view, 566 void *elem); 567 568 /* 569 * This macro sets "elem" to NULL on free. 570 * 571 * Note: all values passed to zfree*() might be in the element to be freed, 572 * temporaries must be taken, and the resetting to be done prior to free. 573 */ 574 #define zfree(zone, elem) ({ \ 575 __auto_type __zfree_zone = (zone); \ 576 (zfree)(__zfree_zone, (void *)__zalloc_ptr_load_and_erase(elem)); \ 577 }) 578 579 #define zfree_ro(zid, elem) ({ \ 580 __auto_type __zfree_zid = (zid); \ 581 (zfree_ro)(__zfree_zid, (void *)__zalloc_ptr_load_and_erase(elem)); \ 582 }) 583 584 /* deprecated KPIS */ 585 586 __zalloc_deprecated("use zone_create()") 587 extern zone_t zinit( 588 vm_size_t size, /* the size of an element */ 589 vm_size_t maxmem, /* maximum memory to use */ 590 vm_size_t alloc, /* allocation size */ 591 const char *name); /* a name for the zone */ 592 593 594 #pragma mark: zone views 595 /*! 596 * @typedef zone_stats_t 597 * 598 * @abstract 599 * The opaque type for per-cpu zone stats that are accumulated per zone 600 * or per zone-view. 601 */ 602 typedef struct zone_stats *__zpercpu zone_stats_t; 603 604 /*! 605 * @typedef zone_view_t 606 * 607 * @abstract 608 * A view on a zone for accounting purposes. 609 * 610 * @discussion 611 * A zone view uses the zone it references for the allocations backing store, 612 * but does the allocation accounting at the view level. 613 * 614 * These accounting are surfaced by @b zprint(1) and similar tools, 615 * which allow for cheap but finer grained understanding of allocations 616 * without any fragmentation cost. 617 * 618 * Zone views are protected by the kernel lockdown and can't be initialized 619 * dynamically. They must be created using @c ZONE_VIEW_DEFINE(). 620 */ 621 typedef struct zone_view *zone_view_t; 622 struct zone_view { 623 zone_t zv_zone; 624 zone_stats_t zv_stats; 625 const char *zv_name; 626 zone_view_t zv_next; 627 }; 628 629 #ifdef XNU_KERNEL_PRIVATE 630 /*! 631 * @enum zone_kheap_id_t 632 * 633 * @brief 634 * Enumerate a particular kalloc heap. 635 * 636 * @discussion 637 * More documentation about heaps is available in @c <kern/kalloc.h>. 638 * 639 * @const KHEAP_ID_NONE 640 * This value denotes regular zones, not used by kalloc. 641 * 642 * @const KHEAP_ID_DEFAULT 643 * Indicates zones part of the KHEAP_DEFAULT heap. 644 * 645 * @const KHEAP_ID_DATA_BUFFERS 646 * Indicates zones part of the KHEAP_DATA_BUFFERS heap. 647 * 648 * @const KHEAP_ID_KEXT 649 * Indicates zones part of the KHEAP_KEXT heap. 650 */ 651 __enum_decl(zone_kheap_id_t, uint32_t, { 652 KHEAP_ID_NONE, 653 KHEAP_ID_DEFAULT, 654 KHEAP_ID_DATA_BUFFERS, 655 KHEAP_ID_KEXT, 656 657 #define KHEAP_ID_COUNT (KHEAP_ID_KEXT + 1) 658 }); 659 660 /*! 661 * @macro ZONE_VIEW_DECLARE 662 * 663 * @abstract 664 * (optionally) declares a zone view (in a header). 665 * 666 * @param var the name for the zone view. 667 */ 668 #define ZONE_VIEW_DECLARE(var) \ 669 extern struct zone_view var[1] 670 671 /*! 672 * @macro ZONE_VIEW_DEFINE 673 * 674 * @abstract 675 * Defines a given zone view and what it points to. 676 * 677 * @discussion 678 * Zone views can either share a pre-existing zone, 679 * or perform a lookup into a kalloc heap for the zone 680 * backing the bucket of the proper size. 681 * 682 * Zone views are initialized during the @c STARTUP_SUB_ZALLOC phase, 683 * as the last rank. If views on zones are created, these must have been 684 * created before this stage. 685 * 686 * This macro should not be used to create zone views from default 687 * kalloc heap, KALLOC_TYPE_DEFINE should be used instead. 688 * 689 * @param var the name for the zone view. 690 * @param name a string describing the zone view. 691 * @param heap_or_zone a @c KHEAP_ID_* constant or a pointer to a zone. 692 * @param size the element size to be allocated from this view. 693 */ 694 #define ZONE_VIEW_DEFINE(var, name, heap_or_zone, size) \ 695 SECURITY_READ_ONLY_LATE(struct zone_view) var[1] = { { \ 696 .zv_name = name, \ 697 } }; \ 698 static __startup_data struct zone_view_startup_spec \ 699 __startup_zone_view_spec_ ## var = { var, { heap_or_zone }, size }; \ 700 STARTUP_ARG(ZALLOC, STARTUP_RANK_LAST, zone_view_startup_init, \ 701 &__startup_zone_view_spec_ ## var) 702 703 #endif /* XNU_KERNEL_PRIVATE */ 704 705 706 #ifdef XNU_KERNEL_PRIVATE 707 #pragma mark - XNU only interfaces 708 709 #include <kern/cpu_number.h> 710 711 #pragma GCC visibility push(hidden) 712 713 #pragma mark XNU only: zalloc (extended) 714 715 #define ZALIGN_NONE (sizeof(uint8_t) - 1) 716 #define ZALIGN_16 (sizeof(uint16_t) - 1) 717 #define ZALIGN_32 (sizeof(uint32_t) - 1) 718 #define ZALIGN_PTR (sizeof(void *) - 1) 719 #define ZALIGN_64 (sizeof(uint64_t) - 1) 720 #define ZALIGN(t) (_Alignof(t) - 1) 721 722 723 /*! 724 * @function zalloc_permanent() 725 * 726 * @abstract 727 * Allocates a permanent element from the permanent zone 728 * 729 * @discussion 730 * Memory returned by this function is always 0-initialized. 731 * Note that the size of this allocation can not be determined 732 * by zone_element_size so it should not be used for copyio. 733 * 734 * @param size the element size (must be smaller than PAGE_SIZE) 735 * @param align_mask the required alignment for this allocation 736 * 737 * @returns the allocated element 738 */ 739 extern void *zalloc_permanent( 740 vm_size_t size, 741 vm_offset_t align_mask); 742 743 /*! 744 * @function zalloc_permanent_type() 745 * 746 * @abstract 747 * Allocates a permanent element of a given type with its natural alignment. 748 * 749 * @discussion 750 * Memory returned by this function is always 0-initialized. 751 * 752 * @param type_t the element type 753 * 754 * @returns the allocated element 755 */ 756 #define zalloc_permanent_type(type_t) \ 757 ((type_t *)zalloc_permanent(sizeof(type_t), ZALIGN(type_t))) 758 759 /*! 760 * @function zalloc_first_proc_made() 761 * 762 * @abstract 763 * Declare that the "early" allocation phase is done. 764 */ 765 extern void 766 zalloc_first_proc_made(void); 767 768 #pragma mark XNU only: per-cpu allocations 769 770 /*! 771 * @macro zpercpu_get_cpu() 772 * 773 * @abstract 774 * Get a pointer to a specific CPU slot of a given per-cpu variable. 775 * 776 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()). 777 * @param cpu the specified CPU number as returned by @c cpu_number() 778 * 779 * @returns the per-CPU slot for @c ptr for the specified CPU. 780 */ 781 #define zpercpu_get_cpu(ptr, cpu) \ 782 __zpcpu_cast(ptr, __zpcpu_demangle(ptr) + ptoa((unsigned)cpu)) 783 784 /*! 785 * @macro zpercpu_get() 786 * 787 * @abstract 788 * Get a pointer to the current CPU slot of a given per-cpu variable. 789 * 790 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()). 791 * 792 * @returns the per-CPU slot for @c ptr for the current CPU. 793 */ 794 #define zpercpu_get(ptr) \ 795 zpercpu_get_cpu(ptr, cpu_number()) 796 797 /*! 798 * @macro zpercpu_foreach() 799 * 800 * @abstract 801 * Enumerate all per-CPU slots by address. 802 * 803 * @param it the name for the iterator 804 * @param ptr the per-cpu pointer (returned by @c zalloc_percpu*()). 805 */ 806 #define zpercpu_foreach(it, ptr) \ 807 for (typeof(ptr) it = zpercpu_get_cpu(ptr, 0), \ 808 __end_##it = zpercpu_get_cpu(ptr, zpercpu_count()); \ 809 it < __end_##it; it = __zpcpu_next(it)) 810 811 /*! 812 * @macro zpercpu_foreach_cpu() 813 * 814 * @abstract 815 * Enumerate all per-CPU slots by CPU slot number. 816 * 817 * @param cpu the name for cpu number iterator. 818 */ 819 #define zpercpu_foreach_cpu(cpu) \ 820 for (unsigned cpu = 0; cpu < zpercpu_count(); cpu++) 821 822 /*! 823 * @function zalloc_percpu() 824 * 825 * @abstract 826 * Allocates an element from a per-cpu zone. 827 * 828 * @discussion 829 * The returned pointer cannot be used directly and must be manipulated 830 * through the @c zpercpu_get*() interfaces. 831 * 832 * @param zone_or_view the zone or zone view to allocate from 833 * @param flags a collection of @c zalloc_flags_t. 834 * 835 * @returns NULL or the allocated element 836 */ 837 extern void *zalloc_percpu( 838 zone_or_view_t zone_or_view, 839 zalloc_flags_t flags); 840 841 /*! 842 * @function zfree_percpu() 843 * 844 * @abstract 845 * Frees an element previously allocated with @c zalloc_percpu(). 846 * 847 * @param zone_or_view the zone or zone view to free the element to. 848 * @param addr the address to free 849 */ 850 extern void zfree_percpu( 851 zone_or_view_t zone_or_view, 852 void *addr); 853 854 /*! 855 * @function zalloc_percpu_permanent() 856 * 857 * @abstract 858 * Allocates a permanent percpu-element from the permanent percpu zone. 859 * 860 * @discussion 861 * Memory returned by this function is always 0-initialized. 862 * 863 * @param size the element size (must be smaller than PAGE_SIZE) 864 * @param align_mask the required alignment for this allocation 865 * 866 * @returns the allocated element 867 */ 868 extern void *zalloc_percpu_permanent( 869 vm_size_t size, 870 vm_offset_t align_mask); 871 872 /*! 873 * @function zalloc_percpu_permanent_type() 874 * 875 * @abstract 876 * Allocates a permanent percpu-element from the permanent percpu zone of a given 877 * type with its natural alignment. 878 * 879 * @discussion 880 * Memory returned by this function is always 0-initialized. 881 * 882 * @param type_t the element type 883 * 884 * @returns the allocated element 885 */ 886 #define zalloc_percpu_permanent_type(type_t) \ 887 ((type_t *)zalloc_percpu_permanent(sizeof(type_t), ZALIGN(type_t))) 888 889 890 #pragma mark XNU only: zone creation (extended) 891 892 /*! 893 * @enum zone_reserved_id_t 894 * 895 * @abstract 896 * Well known pre-registered zones, allowing use of zone_id_require() 897 * 898 * @discussion 899 * @c ZONE_ID__* aren't real zone IDs. 900 * 901 * @c ZONE_ID__ZERO reserves zone index 0 so that it can't be used, as 0 is too 902 * easy a value to produce (by malice or accident). 903 * 904 * @c ZONE_ID__FIRST_DYNAMIC is the first dynamic zone ID that can be used by 905 * @c zone_create(). 906 */ 907 __enum_decl(zone_reserved_id_t, zone_id_t, { 908 ZONE_ID__ZERO, 909 910 ZONE_ID_PERMANENT, 911 ZONE_ID_PERCPU_PERMANENT, 912 913 ZONE_ID_THREAD_RO, 914 ZONE_ID_MAC_LABEL, 915 ZONE_ID_PROC_RO, 916 ZONE_ID_PROC_SIGACTS_RO, 917 ZONE_ID_KAUTH_CRED, 918 ZONE_ID_CS_BLOB, 919 920 ZONE_ID__FIRST_RO = ZONE_ID_THREAD_RO, 921 ZONE_ID__LAST_RO = ZONE_ID_CS_BLOB, 922 923 ZONE_ID_PMAP, 924 ZONE_ID_VM_MAP, 925 ZONE_ID_VM_MAP_COPY, 926 ZONE_ID_VM_MAP_ENTRY, 927 ZONE_ID_VM_MAP_HOLES, 928 ZONE_ID_VM_PAGES, 929 ZONE_ID_IPC_PORT, 930 ZONE_ID_IPC_PORT_SET, 931 ZONE_ID_IPC_VOUCHERS, 932 ZONE_ID_TASK, 933 ZONE_ID_PROC, 934 ZONE_ID_THREAD, 935 ZONE_ID_TURNSTILE, 936 ZONE_ID_SEMAPHORE, 937 ZONE_ID_FILEPROC, 938 939 ZONE_ID__FIRST_DYNAMIC, 940 }); 941 942 /*! 943 * @const ZONE_ID_ANY 944 * The value to pass to @c zone_create_ext() to allocate a non pre-registered 945 * Zone ID. 946 */ 947 #define ZONE_ID_ANY ((zone_id_t)-1) 948 949 /*! 950 * @const ZONE_ID_INVALID 951 * An invalid zone_id_t that corresponds to nothing. 952 */ 953 #define ZONE_ID_INVALID ((zone_id_t)-2) 954 955 /**! 956 * @function zone_name 957 * 958 * @param zone the specified zone 959 * @returns the name of the specified zone. 960 */ 961 const char *zone_name( 962 zone_t zone); 963 964 /**! 965 * @function zone_heap_name 966 * 967 * @param zone the specified zone 968 * @returns the name of the heap this zone is part of, or "". 969 */ 970 const char *zone_heap_name( 971 zone_t zone); 972 973 /*! 974 * @function zone_create_ext 975 * 976 * @abstract 977 * Creates a zone with the specified parameters. 978 * 979 * @discussion 980 * This is an extended version of @c zone_create(). 981 * 982 * @param name the name for the new zone. 983 * @param size the size of the elements returned by this zone. 984 * @param flags a set of @c zone_create_flags_t flags. 985 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY. 986 * 987 * @param extra_setup a block that can perform non trivial initialization 988 * on the zone before it is marked valid. 989 * This block can call advanced setups like: 990 * - zone_set_exhaustible() 991 * - zone_set_noexpand() 992 * 993 * @returns the created zone, this call never fails. 994 */ 995 extern zone_t zone_create_ext( 996 const char *name, 997 vm_size_t size, 998 zone_create_flags_t flags, 999 zone_id_t desired_zid, 1000 void (^extra_setup)(zone_t)); 1001 1002 /*! 1003 * @macro ZONE_DECLARE 1004 * 1005 * @abstract 1006 * Declares a zone variable to automatically initialize with the specified 1007 * parameters. 1008 * 1009 * @param var the name of the variable to declare. 1010 * @param name the name for the zone 1011 * @param size the size of the elements returned by this zone. 1012 * @param flags a set of @c zone_create_flags_t flags. 1013 */ 1014 #define ZONE_DECLARE(var, name, size, flags) \ 1015 SECURITY_READ_ONLY_LATE(zone_t) var; \ 1016 static_assert(((flags) & ZC_DESTRUCTIBLE) == 0); \ 1017 static __startup_data struct zone_create_startup_spec \ 1018 __startup_zone_spec_ ## var = { &var, name, size, flags, \ 1019 ZONE_ID_ANY, NULL }; \ 1020 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \ 1021 &__startup_zone_spec_ ## var) 1022 1023 /*! 1024 * @macro ZONE_INIT 1025 * 1026 * @abstract 1027 * Initializes a given zone automatically during startup with the specified 1028 * parameters. 1029 * 1030 * @param var the name of the variable to initialize. 1031 * @param name the name for the zone 1032 * @param size the size of the elements returned by this zone. 1033 * @param flags a set of @c zone_create_flags_t flags. 1034 * @param desired_zid a @c zone_reserved_id_t value or @c ZONE_ID_ANY. 1035 * @param extra_setup a block that can perform non trivial initialization 1036 * (@see @c zone_create_ext()). 1037 */ 1038 #define ZONE_INIT(var, name, size, flags, desired_zid, extra_setup) \ 1039 __ZONE_INIT(__LINE__, var, name, size, flags, desired_zid, extra_setup) 1040 1041 /*! 1042 * @function zone_id_require 1043 * 1044 * @abstract 1045 * Requires for a given pointer to belong to the specified zone, by ID and size. 1046 * 1047 * @discussion 1048 * The function panics if the check fails as it indicates that the kernel 1049 * internals have been compromised. 1050 * 1051 * This is a variant of @c zone_require() which: 1052 * - isn't sensitive to @c zone_t::elem_size being compromised, 1053 * - is slightly faster as it saves one load and a multiplication. 1054 * 1055 * @warning: zones using foreign memory can't use this interface. 1056 * 1057 * @param zone_id the zone ID the address needs to belong to. 1058 * @param elem_size the size of elements for this zone. 1059 * @param addr the element address to check. 1060 */ 1061 extern void zone_id_require( 1062 zone_id_t zone_id, 1063 vm_size_t elem_size, 1064 void *addr); 1065 1066 /*! 1067 * @function zone_id_require_allow_foreign 1068 * 1069 * @abstract 1070 * Requires for a given pointer to belong to the specified zone, by ID and size. 1071 * 1072 * @discussion 1073 * This is a version of @c zone_id_require() that works with zones allowing 1074 * foreign memory. 1075 */ 1076 extern void zone_id_require_allow_foreign( 1077 zone_id_t zone_id, 1078 vm_size_t elem_size, 1079 void *addr); 1080 1081 /* Make zone as non expandable, to be called from the zone_create_ext() setup hook */ 1082 extern void zone_set_noexpand( 1083 zone_t zone, 1084 vm_size_t max_elements); 1085 1086 /* Make zone exhaustible, to be called from the zone_create_ext() setup hook */ 1087 extern void zone_set_exhaustible( 1088 zone_t zone, 1089 vm_size_t max_elements); 1090 1091 /*! 1092 * @function zone_fill_initially 1093 * 1094 * @brief 1095 * Initially fill a non collectable zone to have the specified amount of 1096 * elements. 1097 * 1098 * @discussion 1099 * This function must be called on a non collectable permanent zone before it 1100 * has been used yet. 1101 * 1102 * @param zone The zone to fill. 1103 * @param nelems The number of elements to be able to hold. 1104 */ 1105 extern void zone_fill_initially( 1106 zone_t zone, 1107 vm_size_t nelems); 1108 1109 #pragma mark XNU only: misc & implementation details 1110 1111 struct zone_create_startup_spec { 1112 zone_t *z_var; 1113 const char *z_name; 1114 vm_size_t z_size; 1115 zone_create_flags_t z_flags; 1116 zone_id_t z_zid; 1117 void (^z_setup)(zone_t); 1118 }; 1119 1120 extern void zone_create_startup( 1121 struct zone_create_startup_spec *spec); 1122 1123 #define __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \ 1124 static __startup_data struct zone_create_startup_spec \ 1125 __startup_zone_spec_ ## ns = { var, name, size, flags, zid, setup }; \ 1126 STARTUP_ARG(ZALLOC, STARTUP_RANK_MIDDLE, zone_create_startup, \ 1127 &__startup_zone_spec_ ## ns) 1128 1129 #define __ZONE_INIT(ns, var, name, size, flags, zid, setup) \ 1130 __ZONE_INIT1(ns, var, name, size, flags, zid, setup) \ 1131 1132 struct zone_view_startup_spec { 1133 zone_view_t zv_view; 1134 union { 1135 zone_kheap_id_t zv_heapid; 1136 zone_t *zv_zone; 1137 }; 1138 vm_size_t zv_size; 1139 }; 1140 1141 extern void zone_view_startup_init( 1142 struct zone_view_startup_spec *spec); 1143 1144 extern void zone_userspace_reboot_checks(void); 1145 1146 #if DEBUG || DEVELOPMENT 1147 # if __LP64__ 1148 # define ZPCPU_MANGLE_BIT (1ul << 63) 1149 # else /* !__LP64__ */ 1150 # define ZPCPU_MANGLE_BIT (1ul << 31) 1151 # endif /* !__LP64__ */ 1152 #else /* !(DEBUG || DEVELOPMENT) */ 1153 # define ZPCPU_MANGLE_BIT 0ul 1154 #endif /* !(DEBUG || DEVELOPMENT) */ 1155 1156 #define __zpcpu_mangle(ptr) (__zpcpu_addr(ptr) & ~ZPCPU_MANGLE_BIT) 1157 #define __zpcpu_demangle(ptr) (__zpcpu_addr(ptr) | ZPCPU_MANGLE_BIT) 1158 #define __zpcpu_addr(e) ((vm_address_t)(e)) 1159 #define __zpcpu_cast(ptr, e) ((typeof(ptr))(e)) 1160 #define __zpcpu_next(ptr) __zpcpu_cast(ptr, __zpcpu_addr(ptr) + PAGE_SIZE) 1161 1162 /** 1163 * @macro __zpcpu_mangle_for_boot() 1164 * 1165 * @discussion 1166 * Per-cpu variables allocated in zones (as opposed to percpu globals) that need 1167 * to function early during boot (before @c STARTUP_SUB_ZALLOC) might use static 1168 * storage marked @c __startup_data and replace it with the proper allocation 1169 * at the end of the @c STARTUP_SUB_ZALLOC phase (@c STARTUP_RANK_LAST). 1170 * 1171 * However, some devices boot from a cpu where @c cpu_number() != 0. This macro 1172 * provides the proper mangling of the storage into a "fake" percpu pointer so 1173 * that accesses through @c zpercpu_get() functions properly. 1174 * 1175 * This is invalid to use after the @c STARTUP_SUB_ZALLOC phase has completed. 1176 */ 1177 #define __zpcpu_mangle_for_boot(ptr) ({ \ 1178 assert(startup_phase < STARTUP_SUB_ZALLOC); \ 1179 __zpcpu_cast(ptr, __zpcpu_mangle(__zpcpu_addr(ptr) - ptoa(cpu_number()))); \ 1180 }) 1181 1182 extern unsigned zpercpu_count(void) __pure2; 1183 1184 1185 /* These functions used for leak detection both in zalloc.c and mbuf.c */ 1186 extern uintptr_t hash_mix(uintptr_t); 1187 extern uint32_t hashbacktrace(uintptr_t *, uint32_t, uint32_t); 1188 extern uint32_t hashaddr(uintptr_t, uint32_t); 1189 1190 #if CONFIG_ZLEAKS 1191 /* support for the kern.zleak.* sysctls */ 1192 1193 extern kern_return_t zleak_activate(void); 1194 extern vm_size_t zleak_max_zonemap_size; 1195 extern vm_size_t zleak_global_tracking_threshold; 1196 extern vm_size_t zleak_per_zone_tracking_threshold; 1197 1198 extern int get_zleak_state(void); 1199 1200 #endif /* CONFIG_ZLEAKS */ 1201 1202 extern zone_t percpu_u64_zone; 1203 1204 #pragma GCC visibility pop 1205 #endif /* XNU_KERNEL_PRIVATE */ 1206 1207 #define __zalloc_ptr_load_and_erase(elem) ({ \ 1208 _Static_assert(sizeof(elem) == sizeof(void *), \ 1209 "elem isn't pointer sized"); \ 1210 __auto_type __eptr = &(elem); \ 1211 __auto_type __elem = *__eptr; \ 1212 *__eptr = (__typeof__(__elem))NULL; \ 1213 __elem; \ 1214 }) 1215 1216 __END_DECLS 1217 1218 #endif /* _KERN_ZALLOC_H_ */ 1219 1220 #endif /* KERNEL_PRIVATE */ 1221