1 /* 2 * Copyright (c) 2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * FILE_ID: vm_param.h 30 */ 31 32 /* 33 * ARM machine dependent virtual memory parameters. 34 */ 35 36 #ifndef _MACH_ARM_VM_PARAM_H_ 37 #define _MACH_ARM_VM_PARAM_H_ 38 39 #if defined (__arm__) || defined (__arm64__) 40 41 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) 42 #include <arm64/proc_reg.h> 43 #endif 44 45 #if defined(KERNEL_PRIVATE) && __ARM_16K_PG__ 46 #include <arm64/proc_reg.h> 47 #endif 48 49 #if !defined (KERNEL) && !defined (__ASSEMBLER__) 50 #include <mach/vm_page_size.h> 51 #endif 52 53 #define BYTE_SIZE 8 /* byte size in bits */ 54 55 #if defined (KERNEL) 56 57 #ifndef __ASSEMBLER__ 58 59 #ifdef __arm__ 60 #define PAGE_SHIFT_CONST 12 61 #elif defined(__arm64__) 62 extern int PAGE_SHIFT_CONST; 63 #else 64 #error Unsupported arch 65 #endif 66 67 #if defined(KERNEL_PRIVATE) && __ARM_16K_PG__ 68 #define PAGE_SHIFT ARM_PGSHIFT 69 #else 70 #define PAGE_SHIFT PAGE_SHIFT_CONST 71 #endif 72 #define PAGE_SIZE (1 << PAGE_SHIFT) 73 #define PAGE_MASK (PAGE_SIZE-1) 74 75 #define VM_PAGE_SIZE PAGE_SIZE 76 77 #define machine_ptob(x) ((x) << PAGE_SHIFT) 78 79 /* 80 * Defined for the purpose of testing the pmap advertised page 81 * size; this does not necessarily match the hardware page size. 82 */ 83 #define TEST_PAGE_SIZE_16K ((PAGE_SHIFT_CONST == 14)) 84 #define TEST_PAGE_SIZE_4K ((PAGE_SHIFT_CONST == 12)) 85 86 #endif /* !__ASSEMBLER__ */ 87 88 #else 89 90 #define PAGE_SHIFT vm_page_shift 91 #define PAGE_SIZE vm_page_size 92 #define PAGE_MASK vm_page_mask 93 94 #define VM_PAGE_SIZE vm_page_size 95 96 #define machine_ptob(x) ((x) << PAGE_SHIFT) 97 98 #endif 99 100 #define PAGE_MAX_SHIFT 14 101 #define PAGE_MAX_SIZE (1 << PAGE_MAX_SHIFT) 102 #define PAGE_MAX_MASK (PAGE_MAX_SIZE-1) 103 104 #define PAGE_MIN_SHIFT 12 105 #define PAGE_MIN_SIZE (1 << PAGE_MIN_SHIFT) 106 #define PAGE_MIN_MASK (PAGE_MIN_SIZE-1) 107 108 #define VM_MAX_PAGE_ADDRESS MACH_VM_MAX_ADDRESS 109 110 #ifndef __ASSEMBLER__ 111 112 #ifdef MACH_KERNEL_PRIVATE 113 114 #define VM32_SUPPORT 1 115 #define VM32_MIN_ADDRESS ((vm32_offset_t) 0) 116 #define VM32_MAX_ADDRESS ((vm32_offset_t) (VM_MAX_ADDRESS & 0xFFFFFFFF)) 117 118 #endif /* MACH_KERNEL_PRIVATE */ 119 120 #if defined (__arm__) 121 122 #define VM_MIN_ADDRESS ((vm_address_t) 0x00000000) 123 #define VM_MAX_ADDRESS ((vm_address_t) 0x80000000) 124 125 /* system-wide values */ 126 #define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) 0) 127 #define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) VM_MAX_ADDRESS) 128 129 #elif defined (__arm64__) 130 131 #define VM_MIN_ADDRESS ((vm_address_t) 0x0000000000000000ULL) 132 #define VM_MAX_ADDRESS ((vm_address_t) 0x00000000F0000000ULL) 133 134 /* system-wide values */ 135 #define MACH_VM_MIN_ADDRESS_RAW 0x0ULL 136 #if defined(XNU_PLATFORM_MacOSX) || defined(XNU_PLATFORM_DriverKit) 137 #define MACH_VM_MAX_ADDRESS_RAW 0x00007FFFFE000000ULL 138 #else 139 #define MACH_VM_MAX_ADDRESS_RAW 0x0000000FC0000000ULL 140 #endif 141 142 /* 143 * `MACH_VM_MAX_ADDRESS` is exported to user space, but we don't want this 144 * larger value for `MACH_VM_MAX_ADDRESS` to be exposed outside the kernel. 145 */ 146 #if XNU_KERNEL_PRIVATE 147 #if defined(XNU_PLATFORM_iPhoneOS) && EXTENDED_USER_VA_SUPPORT 148 #undef MACH_VM_MAX_ADDRESS_RAW 149 #define MACH_VM_MAX_ADDRESS_RAW 0x00007FFFFE000000ULL 150 #endif /* defined(XNU_PLATFORM_iPhoneOS) && EXTENDED_USER_VA_SUPPORT */ 151 /* threshold for allocations to be placed in the large file range */ 152 #define VM_LARGE_FILE_THRESHOLD (1ULL << 30) 153 #define MACH_VM_JUMBO_ADDRESS ((mach_vm_offset_t) 0x0000000FC0000000ULL) 154 #endif /* KERNEL_PRIVATE */ 155 156 #define MACH_VM_MIN_ADDRESS ((mach_vm_offset_t) MACH_VM_MIN_ADDRESS_RAW) 157 #define MACH_VM_MAX_ADDRESS ((mach_vm_offset_t) MACH_VM_MAX_ADDRESS_RAW) 158 159 #define MACH_VM_MIN_GPU_CARVEOUT_ADDRESS_RAW 0x0000001000000000ULL 160 #define MACH_VM_MAX_GPU_CARVEOUT_ADDRESS_RAW 0x0000007000000000ULL 161 #define MACH_VM_MIN_GPU_CARVEOUT_ADDRESS ((mach_vm_offset_t) MACH_VM_MIN_GPU_CARVEOUT_ADDRESS_RAW) 162 #define MACH_VM_MAX_GPU_CARVEOUT_ADDRESS ((mach_vm_offset_t) MACH_VM_MAX_GPU_CARVEOUT_ADDRESS_RAW) 163 164 #else /* defined(__arm64__) */ 165 #error architecture not supported 166 #endif 167 168 #define VM_MAP_MIN_ADDRESS VM_MIN_ADDRESS 169 #define VM_MAP_MAX_ADDRESS VM_MAX_ADDRESS 170 171 #ifdef KERNEL 172 173 #if defined (__arm__) 174 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 31 175 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0x80000000) 176 #define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xFFFEFFFF) 177 #define VM_HIGH_KERNEL_WINDOW ((vm_address_t) 0xFFFE0000) 178 179 #elif defined (__arm64__) 180 /* 181 * kalloc() parameters: 182 * 183 * Historically kalloc's underlying zones were power-of-2 sizes, with a 184 * KALLOC_MINSIZE of 16 bytes. Thus the allocator ensured that 185 * (sizeof == alignof) >= 16 for all kalloc allocations. 186 * 187 * Today kalloc may use zones with intermediate (small) sizes, constrained by 188 * KALLOC_MINSIZE and a minimum alignment, expressed by KALLOC_LOG2_MINALIGN. 189 * 190 * Note that most dynamically allocated data structures contain more than 191 * one int/long/pointer member, so KALLOC_MINSIZE should probably start at 8. 192 */ 193 #define TiB(x) ((0ULL + (x)) << 40) 194 #define GiB(x) ((0ULL + (x)) << 30) 195 #define KALLOC_MINSIZE 16 /* minimum allocation size */ 196 #define KALLOC_LOG2_MINALIGN 4 /* log2 minimum alignment */ 197 198 /* 199 * The minimum and maximum kernel address; some configurations may 200 * constrain the address space further. 201 */ 202 203 204 #ifndef __BUILDING_XNU_LIBRARY__ 205 #if XNU_KERNEL_PRIVATE 206 #if defined(ARM_LARGE_MEMORY) 207 /* 208 * +-----------------------+--------+--------+------------------------+ 209 * | 0xffff_fed0_0000_0000 |-1216GB | 832GB | KASAN_SHADOW_MIN | 210 * | 0xffff_fecf_ffff_ffff | | | VM_MAX_KERNEL_ADDRESS | 211 * +-----------------------+--------+--------+------------------------+ 212 * | 0xffff_fe10_0000_0000 |-1984GB | 64GB | PMAP_HEAP_RANGE_START | 213 * +-----------------------+--------+--------+------------------------+ 214 * | 0xffff_fe00_0700_4000 | | | VM_KERNEL_LINK_ADDRESS | 215 * +-----------------------+--------+--------+------------------------+ 216 * | 0xffff_fe00_0000_0000 |-2048GB | 0GB | VM_MIN_KERNEL_ADDRESS | 217 * | | | | LOW_GLOBALS | 218 * +-----------------------+--------+--------+------------------------+ 219 */ 220 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 41 221 222 // Kernel VA space starts at -2TB 223 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) (0ULL - TiB(2))) 224 225 // 64 GB for kernel cache and globals 226 // 768 GB for heap/general kernel use 227 // 1216 GB left over at the top of the range for KASAN 228 // Assuming KASAN TBI, this lets us cover down to: 229 // 0 - (1216GB<<KASAN_SCALE) = 0xffff_ed00_0000_0000, or ~19.5TB of VA 230 // Since we place the DRAM PAPT below VM_MIN_KERNEL_ADDRESS on large 231 // memory configurations, this configuration works until systems have 232 // ~17.5TB of DRAM. 233 #define VM_MAX_KERNEL_ADDRESS \ 234 ((vm_address_t) (VM_MIN_KERNEL_ADDRESS + GiB(64) + GiB(768) - 1)) 235 236 #else // ARM_LARGE_MEMORY 237 /* 238 * +-----------------------+--------+--------+------------------------+ 239 * | 0xffff_fffc_0000_0000 | -16GB | 112GB | KASAN_SHADOW_MIN | 240 * | | | | VM_MAX_KERNEL_ADDRESS | 241 * +-----------------------+--------+--------+------------------------+ 242 * | 0xffff_fff0_0700_4000 | | | VM_KERNEL_LINK_ADDRESS | 243 * +-----------------------+--------+--------+------------------------+ 244 * | 0xffff_fff0_0000_0000 | -64GB | 64GB | LOW_GLOBALS | 245 * | | | | PMAP_HEAP_RANGE_START | <= H8 246 * +-----------------------+--------+--------+------------------------+ 247 * | 0xffff_ffe0_0000_0000 | -128GB | 0GB | VM_MIN_KERNEL_ADDRESS | <= H8 248 * +-----------------------+--------+--------+------------------------+ 249 * | 0xffff_ffdc_0000_0000 | -144GB | 0GB | VM_MIN_KERNEL_ADDRESS | >= H9 250 * | | | | PMAP_HEAP_RANGE_START | >= H9 251 * +-----------------------+--------+--------+------------------------+ 252 */ 253 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) 254 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 38 255 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) (0ULL - GiB(144))) 256 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */ 257 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 37 258 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) 0xffffffe000000000ULL) 259 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */ 260 #define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xfffffffbffffffffULL) 261 262 #endif // ARM_LARGE_MEMORY 263 264 #else // !XNU_KERNEL_PRIVATE 265 // Inform kexts about largest possible kernel address space 266 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 41 267 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t) (0ULL - TiB(2))) 268 #define VM_MAX_KERNEL_ADDRESS ((vm_address_t) 0xfffffffbffffffffULL) 269 #endif // XNU_KERNEL_PRIVATE 270 #else /* __BUILDING_XNU_LIBRARY__ */ 271 #define VM_MIN_KERNEL_ADDRESS ((vm_address_t)(0x100000000ULL)) 272 #define VM_MAX_KERNEL_ADDRESS ((vm_address_t)(0ULL + GiB(2))) 273 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS 31 274 #endif /*__BUILDING_XNU_LIBRARY__ */ 275 #else 276 #error architecture not supported 277 #endif 278 279 #define VM_MIN_KERNEL_AND_KEXT_ADDRESS VM_MIN_KERNEL_ADDRESS 280 281 #if defined (__arm64__) 282 /* Top-Byte-Ignore */ 283 #define ARM_TBI_USER_MASK (0xFF00000000000000ULL) 284 #define VM_USER_STRIP_TBI(_v) ((typeof (_v))(((uintptr_t)(_v)) &~ (ARM_TBI_USER_MASK))) 285 #else /* __arm64__ */ 286 #define VM_USER_STRIP_TBI(_v) (_v) 287 #endif /* __arm64__ */ 288 289 290 #if __arm64__ 291 292 #if XNU_KERNEL_PRIVATE 293 #define VM_KERNEL_STRIP_MASK (-1ULL << (64 - T1SZ_BOOT)) 294 #define VM_USER_STRIP_MASK (-1ULL >> (T0SZ_BOOT)) 295 #define _VM_KERNEL_STRIP_PTR(_va) ({((_va) & 1ULL << 55) ? ((_va) | VM_KERNEL_STRIP_MASK) : ((_va) & VM_USER_STRIP_MASK);}) 296 #else /* XNU_KERNEL_PRIVATE */ 297 298 #if __has_feature(ptrauth_calls) 299 #include <ptrauth.h> 300 #define VM_KERNEL_STRIP_PAC(_v) ((uintptr_t)(ptrauth_strip((void *)(uintptr_t)(_v), ptrauth_key_asia))) 301 #else /* !ptrauth_calls */ 302 #define VM_KERNEL_STRIP_PAC(_v) (_v) 303 #endif /* ptrauth_calls */ 304 /* For KEXT, just blow away TBI bits, even if only used for KASAN. */ 305 #define _VM_KERNEL_STRIP_PTR(_v) (VM_KERNEL_STRIP_PAC(_v) | (0xFF00000000000000ULL)) 306 #endif /* XNU_KERNEL_PRIVATE */ 307 308 #else /* __arm64__ */ 309 #define _VM_KERNEL_STRIP_PTR(_v) (_v) 310 #endif /* __arm64__ */ 311 312 #define VM_KERNEL_STRIP_PTR(_va) (_VM_KERNEL_STRIP_PTR((uintptr_t)(_va))) 313 314 /* Vestige from the past, kept for retro-compatibility. */ 315 #define VM_KERNEL_STRIP_UPTR(_va) (VM_KERNEL_STRIP_PTR(_va)) 316 317 #define VM_KERNEL_ADDRESS(_va) \ 318 ((VM_KERNEL_STRIP_PTR(_va) >= VM_MIN_KERNEL_ADDRESS) && \ 319 (VM_KERNEL_STRIP_PTR(_va) <= VM_MAX_KERNEL_ADDRESS)) 320 321 #define VM_USER_STRIP_PTR(_v) (VM_USER_STRIP_TBI(_v)) 322 323 #if DEBUG || DEVELOPMENT || !defined(HAS_APPLE_PAC) 324 325 #define ML_ADDRPERM(addr, slide) ((addr) + (slide)) 326 327 #else /* DEBUG || DEVELOPMENT || !defined(HAS_APPLE_PAC) */ 328 329 /** 330 * While these function's implementations are machine specific, due to the need 331 * to prevent header file circular dependencies, they need to be externed here 332 * for usage in the addrperm macro 333 */ 334 __BEGIN_DECLS 335 vm_offset_t ml_addrperm_pacga(vm_offset_t addr); 336 __END_DECLS 337 338 #define ML_ADDRPERM(addr, slide) ml_addrperm_pacga(addr) 339 340 #endif /* DEBUG || DEVELOPMENT || !defined(HAS_APPLE_PAC) */ 341 342 #ifdef MACH_KERNEL_PRIVATE 343 /* 344 * Physical memory is mapped linearly at an offset virtual memory. 345 */ 346 extern unsigned long gVirtBase, gPhysBase, gPhysSize; 347 348 #define isphysmem(a) (((vm_address_t)(a) - gPhysBase) < gPhysSize) 349 #define physmap_enclosed(a) isphysmem(a) 350 351 /* 352 * gPhysBase/Size only represent kernel-managed memory. These globals represent 353 * the actual DRAM base address and size as reported by iBoot through the device 354 * tree. 355 */ 356 #include <stdint.h> 357 extern uint64_t gDramBase, gDramSize; 358 #define is_dram_addr(addr) (((uint64_t)(addr) - gDramBase) < gDramSize) 359 360 #endif /* MACH_KERNEL_PRIVATE */ 361 362 #ifdef XNU_KERNEL_PRIVATE 363 364 #if KASAN 365 /* Increase the stack sizes to account for the redzones that get added to every 366 * stack object. */ 367 # define KERNEL_STACK_SIZE (4*4*4096) 368 #elif DEBUG 369 /** 370 * Increase the stack size to account for less efficient use of stack space when 371 * compiling with -O0. 372 */ 373 # define KERNEL_STACK_SIZE (2*4*4096) 374 #else 375 /* 376 * KERNEL_STACK_MULTIPLIER can be defined externally to get a larger 377 * kernel stack size. For example, adding "-DKERNEL_STACK_MULTIPLIER=2" 378 * helps avoid kernel stack overflows when compiling with "-O0". 379 */ 380 #ifndef KERNEL_STACK_MULTIPLIER 381 #define KERNEL_STACK_MULTIPLIER (1) 382 #endif /* KERNEL_STACK_MULTIPLIER */ 383 # define KERNEL_STACK_SIZE (4*4096*KERNEL_STACK_MULTIPLIER) 384 #endif /* XNU_KERNEL_PRIVATE */ 385 386 #define INTSTACK_SIZE (4*4096) 387 388 #ifdef __arm64__ 389 #define EXCEPSTACK_SIZE (4*4096) 390 #else 391 #define FIQSTACK_SIZE (4096) 392 #endif 393 394 #if defined (__arm__) 395 #define HIGH_EXC_VECTORS ((vm_address_t) 0xFFFF0000) 396 #endif 397 398 /* 399 * TODO: We're hardcoding the expected virtual TEXT base here; 400 * that gives us an ugly dependency on a linker argument in 401 * the make files. Clean this up, so we don't hardcode it 402 * twice; this is nothing but trouble. 403 */ 404 #if defined (__arm__) 405 #define VM_KERNEL_LINK_ADDRESS ((vm_address_t) 0x80000000) 406 #elif defined (__arm64__) 407 /* VM_KERNEL_LINK_ADDRESS defined in makedefs/MakeInc.def for arm64 platforms */ 408 #else 409 #error architecture not supported 410 #endif 411 412 #endif /* MACH_KERNEL_PRIVATE */ 413 #endif /* KERNEL */ 414 415 #endif /* !__ASSEMBLER__ */ 416 417 #define SWI_SYSCALL 0x80 418 419 #endif /* defined (__arm__) || defined (__arm64__) */ 420 421 #endif /* _MACH_ARM_VM_PARAM_H_ */ 422