xref: /xnu-11417.121.6/osfmk/mach/arm/vm_param.h (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650) !
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * FILE_ID: vm_param.h
30  */
31 
32 /*
33  *	ARM machine dependent virtual memory parameters.
34  */
35 
36 #ifndef _MACH_ARM_VM_PARAM_H_
37 #define _MACH_ARM_VM_PARAM_H_
38 
39 #if defined (__arm__) || defined (__arm64__)
40 
41 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
42 #include <arm64/proc_reg.h>
43 #endif
44 
45 #if defined(KERNEL_PRIVATE) && __ARM_16K_PG__
46 #include <arm64/proc_reg.h>
47 #endif
48 
49 #if !defined (KERNEL) && !defined (__ASSEMBLER__)
50 #include <mach/vm_page_size.h>
51 #endif
52 
53 #define BYTE_SIZE       8       /* byte size in bits */
54 
55 #if defined (KERNEL)
56 
57 #ifndef __ASSEMBLER__
58 
59 #ifdef  __arm__
60 #define PAGE_SHIFT_CONST        12
61 #elif defined(__arm64__)
62 extern int PAGE_SHIFT_CONST;
63 #else
64 #error Unsupported arch
65 #endif
66 
67 #if defined(KERNEL_PRIVATE) && __ARM_16K_PG__
68 #define PAGE_SHIFT              ARM_PGSHIFT
69 #else
70 #define PAGE_SHIFT              PAGE_SHIFT_CONST
71 #endif
72 #define PAGE_SIZE               (1 << PAGE_SHIFT)
73 #define PAGE_MASK               (PAGE_SIZE-1)
74 
75 #define VM_PAGE_SIZE            PAGE_SIZE
76 
77 #define machine_ptob(x)         ((x) << PAGE_SHIFT)
78 
79 /*
80  * Defined for the purpose of testing the pmap advertised page
81  * size; this does not necessarily match the hardware page size.
82  */
83 #define TEST_PAGE_SIZE_16K      ((PAGE_SHIFT_CONST == 14))
84 #define TEST_PAGE_SIZE_4K       ((PAGE_SHIFT_CONST == 12))
85 
86 #endif  /* !__ASSEMBLER__ */
87 
88 #else
89 
90 #define PAGE_SHIFT                      vm_page_shift
91 #define PAGE_SIZE                       vm_page_size
92 #define PAGE_MASK                       vm_page_mask
93 
94 #define VM_PAGE_SIZE            vm_page_size
95 
96 #define machine_ptob(x)         ((x) << PAGE_SHIFT)
97 
98 #endif
99 
100 #define PAGE_MAX_SHIFT          14
101 #define PAGE_MAX_SIZE           (1 << PAGE_MAX_SHIFT)
102 #define PAGE_MAX_MASK           (PAGE_MAX_SIZE-1)
103 
104 #define PAGE_MIN_SHIFT          12
105 #define PAGE_MIN_SIZE           (1 << PAGE_MIN_SHIFT)
106 #define PAGE_MIN_MASK           (PAGE_MIN_SIZE-1)
107 
108 #define VM_MAX_PAGE_ADDRESS     MACH_VM_MAX_ADDRESS
109 
110 #ifndef __ASSEMBLER__
111 
112 #ifdef  MACH_KERNEL_PRIVATE
113 
114 #define VM32_SUPPORT            1
115 #define VM32_MIN_ADDRESS        ((vm32_offset_t) 0)
116 #define VM32_MAX_ADDRESS        ((vm32_offset_t) (VM_MAX_ADDRESS & 0xFFFFFFFF))
117 
118 #endif /* MACH_KERNEL_PRIVATE */
119 
120 #if defined (__arm__)
121 
122 #define VM_MIN_ADDRESS          ((vm_address_t) 0x00000000)
123 #define VM_MAX_ADDRESS          ((vm_address_t) 0x80000000)
124 
125 /* system-wide values */
126 #define MACH_VM_MIN_ADDRESS     ((mach_vm_offset_t) 0)
127 #define MACH_VM_MAX_ADDRESS     ((mach_vm_offset_t) VM_MAX_ADDRESS)
128 
129 #elif defined (__arm64__)
130 
131 #define VM_MIN_ADDRESS          ((vm_address_t) 0x0000000000000000ULL)
132 #define VM_MAX_ADDRESS          ((vm_address_t) 0x00000000F0000000ULL)
133 
134 /* system-wide values */
135 #define MACH_VM_MIN_ADDRESS_RAW 0x0ULL
136 #if defined(XNU_PLATFORM_MacOSX) || defined(XNU_PLATFORM_DriverKit)
137 #define MACH_VM_MAX_ADDRESS_RAW 0x00007FFFFE000000ULL
138 #else
139 #define MACH_VM_MAX_ADDRESS_RAW 0x0000000FC0000000ULL
140 #endif
141 
142 /*
143  * `MACH_VM_MAX_ADDRESS` is exported to user space, but we don't want this
144  * larger value for `MACH_VM_MAX_ADDRESS` to be exposed outside the kernel.
145  */
146 #if XNU_KERNEL_PRIVATE
147 #if defined(XNU_PLATFORM_iPhoneOS) && EXTENDED_USER_VA_SUPPORT
148 #undef MACH_VM_MAX_ADDRESS_RAW
149 #define MACH_VM_MAX_ADDRESS_RAW 0x00007FFFFE000000ULL
150 #endif /* defined(XNU_PLATFORM_iPhoneOS) && EXTENDED_USER_VA_SUPPORT */
151 /* threshold for allocations to be placed in the large file range */
152 #define VM_LARGE_FILE_THRESHOLD (1ULL << 30)
153 #define MACH_VM_JUMBO_ADDRESS ((mach_vm_offset_t) 0x0000000FC0000000ULL)
154 #endif /* KERNEL_PRIVATE */
155 
156 #define MACH_VM_MIN_ADDRESS     ((mach_vm_offset_t) MACH_VM_MIN_ADDRESS_RAW)
157 #define MACH_VM_MAX_ADDRESS     ((mach_vm_offset_t) MACH_VM_MAX_ADDRESS_RAW)
158 
159 #define MACH_VM_MIN_GPU_CARVEOUT_ADDRESS_RAW 0x0000001000000000ULL
160 #define MACH_VM_MAX_GPU_CARVEOUT_ADDRESS_RAW 0x0000007000000000ULL
161 #define MACH_VM_MIN_GPU_CARVEOUT_ADDRESS     ((mach_vm_offset_t) MACH_VM_MIN_GPU_CARVEOUT_ADDRESS_RAW)
162 #define MACH_VM_MAX_GPU_CARVEOUT_ADDRESS     ((mach_vm_offset_t) MACH_VM_MAX_GPU_CARVEOUT_ADDRESS_RAW)
163 
164 #else /* defined(__arm64__) */
165 #error architecture not supported
166 #endif
167 
168 #define VM_MAP_MIN_ADDRESS      VM_MIN_ADDRESS
169 #define VM_MAP_MAX_ADDRESS      VM_MAX_ADDRESS
170 
171 #ifdef  KERNEL
172 
173 #if defined (__arm__)
174 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS  31
175 #define VM_MIN_KERNEL_ADDRESS   ((vm_address_t) 0x80000000)
176 #define VM_MAX_KERNEL_ADDRESS   ((vm_address_t) 0xFFFEFFFF)
177 #define VM_HIGH_KERNEL_WINDOW   ((vm_address_t) 0xFFFE0000)
178 
179 #elif defined (__arm64__)
180 /*
181  * kalloc() parameters:
182  *
183  * Historically kalloc's underlying zones were power-of-2 sizes, with a
184  * KALLOC_MINSIZE of 16 bytes.  Thus the allocator ensured that
185  * (sizeof == alignof) >= 16 for all kalloc allocations.
186  *
187  * Today kalloc may use zones with intermediate (small) sizes, constrained by
188  * KALLOC_MINSIZE and a minimum alignment, expressed by KALLOC_LOG2_MINALIGN.
189  *
190  * Note that most dynamically allocated data structures contain more than
191  * one int/long/pointer member, so KALLOC_MINSIZE should probably start at 8.
192  */
193 #define TiB(x)                  ((0ULL + (x)) << 40)
194 #define GiB(x)                  ((0ULL + (x)) << 30)
195 #define KALLOC_MINSIZE          16      /* minimum allocation size */
196 #define KALLOC_LOG2_MINALIGN    4       /* log2 minimum alignment */
197 
198 /*
199  * The minimum and maximum kernel address; some configurations may
200  * constrain the address space further.
201  */
202 
203 #if XNU_KERNEL_PRIVATE
204 #if defined(ARM_LARGE_MEMORY)
205 /*
206  * +-----------------------+--------+--------+------------------------+
207  * | 0xffff_fed0_0000_0000 |-1216GB |  832GB | KASAN_SHADOW_MIN       |
208  * | 0xffff_fecf_ffff_ffff |        |        | VM_MAX_KERNEL_ADDRESS  |
209  * +-----------------------+--------+--------+------------------------+
210  * | 0xffff_fe10_0000_0000 |-1984GB |   64GB | PMAP_HEAP_RANGE_START  |
211  * +-----------------------+--------+--------+------------------------+
212  * | 0xffff_fe00_0700_4000 |        |        | VM_KERNEL_LINK_ADDRESS |
213  * +-----------------------+--------+--------+------------------------+
214  * | 0xffff_fe00_0000_0000 |-2048GB |    0GB | VM_MIN_KERNEL_ADDRESS  |
215  * |                       |        |        | LOW_GLOBALS            |
216  * +-----------------------+--------+--------+------------------------+
217  */
218 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS  41
219 
220 // Kernel VA space starts at -2TB
221 #define VM_MIN_KERNEL_ADDRESS   ((vm_address_t) (0ULL - TiB(2)))
222 
223 //   64 GB for kernel cache and globals
224 //  768 GB for heap/general kernel use
225 // 1216 GB left over at the top of the range for KASAN
226 //     Assuming KASAN TBI, this lets us cover down to:
227 //     0 - (1216GB<<KASAN_SCALE) = 0xffff_ed00_0000_0000, or ~19.5TB of VA
228 //     Since we place the DRAM PAPT below VM_MIN_KERNEL_ADDRESS on large
229 //     memory configurations, this configuration works until systems have
230 //     ~17.5TB of DRAM.
231 #define VM_MAX_KERNEL_ADDRESS \
232 	((vm_address_t) (VM_MIN_KERNEL_ADDRESS + GiB(64) + GiB(768) - 1))
233 
234 #else // ARM_LARGE_MEMORY
235 /*
236  * +-----------------------+--------+--------+------------------------+
237  * | 0xffff_fffc_0000_0000 |  -16GB |  112GB | KASAN_SHADOW_MIN       |
238  * |                       |        |        | VM_MAX_KERNEL_ADDRESS  |
239  * +-----------------------+--------+--------+------------------------+
240  * | 0xffff_fff0_0700_4000 |        |        | VM_KERNEL_LINK_ADDRESS |
241  * +-----------------------+--------+--------+------------------------+
242  * | 0xffff_fff0_0000_0000 |  -64GB |   64GB | LOW_GLOBALS            |
243  * |                       |        |        | PMAP_HEAP_RANGE_START  | <= H8
244  * +-----------------------+--------+--------+------------------------+
245  * | 0xffff_ffe0_0000_0000 | -128GB |    0GB | VM_MIN_KERNEL_ADDRESS  | <= H8
246  * +-----------------------+--------+--------+------------------------+
247  * | 0xffff_ffdc_0000_0000 | -144GB |    0GB | VM_MIN_KERNEL_ADDRESS  | >= H9
248  * |                       |        |        | PMAP_HEAP_RANGE_START  | >= H9
249  * +-----------------------+--------+--------+------------------------+
250  */
251 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
252 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS  38
253 #define VM_MIN_KERNEL_ADDRESS   ((vm_address_t) (0ULL - GiB(144)))
254 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
255 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS  37
256 #define VM_MIN_KERNEL_ADDRESS   ((vm_address_t) 0xffffffe000000000ULL)
257 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
258 #define VM_MAX_KERNEL_ADDRESS   ((vm_address_t) 0xfffffffbffffffffULL)
259 
260 #endif // ARM_LARGE_MEMORY
261 
262 #else // !XNU_KERNEL_PRIVATE
263 // Inform kexts about largest possible kernel address space
264 #define VM_KERNEL_POINTER_SIGNIFICANT_BITS  41
265 #define VM_MIN_KERNEL_ADDRESS   ((vm_address_t) (0ULL - TiB(2)))
266 #define VM_MAX_KERNEL_ADDRESS   ((vm_address_t) 0xfffffffbffffffffULL)
267 #endif // XNU_KERNEL_PRIVATE
268 #else
269 #error architecture not supported
270 #endif
271 
272 #define VM_MIN_KERNEL_AND_KEXT_ADDRESS  VM_MIN_KERNEL_ADDRESS
273 
274 #if defined (__arm64__)
275 /* Top-Byte-Ignore */
276 #define ARM_TBI_USER_MASK        (0xFF00000000000000ULL)
277 #define VM_USER_STRIP_TBI(_v)    ((typeof (_v))(((uintptr_t)(_v)) &~ (ARM_TBI_USER_MASK)))
278 #else /* __arm64__ */
279 #define VM_USER_STRIP_TBI(_v)    (_v)
280 #endif /* __arm64__ */
281 
282 #if CONFIG_KERNEL_TAGGING
283 #include <vm/vm_memtag.h>
284 /*
285  * 'strip' in PAC sense, therefore replacing the stripped bits sign extending
286  * the sign bit. In kernel space the sign bit is 1, so 0xFF is a valid mask
287  * here.
288  */
289 #define VM_KERNEL_STRIP_TAG(_v)         (vm_memtag_canonicalize_kernel((vm_offset_t)_v))
290 #else /* CONFIG_KERNEL_TAGGING */
291 #define VM_KERNEL_STRIP_TAG(_v)         (_v)
292 #endif /* CONFIG_KERNEL_TAGGING */
293 
294 #if __has_feature(ptrauth_calls)
295 #include <ptrauth.h>
296 #define VM_KERNEL_STRIP_PAC(_v) (ptrauth_strip((void *)(uintptr_t)(_v), ptrauth_key_asia))
297 #else /* !ptrauth_calls */
298 #define VM_KERNEL_STRIP_PAC(_v) (_v)
299 #endif /* ptrauth_calls */
300 
301 #define VM_KERNEL_STRIP_PTR(_va)        ((VM_KERNEL_STRIP_TAG(VM_KERNEL_STRIP_PAC((_va)))))
302 #define VM_KERNEL_STRIP_UPTR(_va)       ((vm_address_t)VM_KERNEL_STRIP_PTR((uintptr_t)(_va)))
303 #define VM_KERNEL_ADDRESS(_va)  \
304 	((VM_KERNEL_STRIP_UPTR(_va) >= VM_MIN_KERNEL_ADDRESS) && \
305 	 (VM_KERNEL_STRIP_UPTR(_va) <= VM_MAX_KERNEL_ADDRESS))
306 
307 #define VM_USER_STRIP_PTR(_v)           (VM_USER_STRIP_TBI(_v))
308 
309 #if DEBUG || DEVELOPMENT || !defined(HAS_APPLE_PAC)
310 
311 #define ML_ADDRPERM(addr, slide) ((addr) + (slide))
312 
313 #else /* DEBUG || DEVELOPMENT || !defined(HAS_APPLE_PAC) */
314 
315 /**
316  * While these function's implementations are machine specific, due to the need
317  * to prevent header file circular dependencies, they need to be externed here
318  * for usage in the addrperm macro
319  */
320 __BEGIN_DECLS
321 vm_offset_t ml_addrperm_pacga(vm_offset_t addr);
322 __END_DECLS
323 
324 #define ML_ADDRPERM(addr, slide) ml_addrperm_pacga(addr)
325 
326 #endif /* DEBUG || DEVELOPMENT || !defined(HAS_APPLE_PAC) */
327 
328 #ifdef  MACH_KERNEL_PRIVATE
329 /*
330  *	Physical memory is mapped linearly at an offset virtual memory.
331  */
332 extern unsigned long            gVirtBase, gPhysBase, gPhysSize;
333 
334 #define isphysmem(a)            (((vm_address_t)(a) - gPhysBase) < gPhysSize)
335 #define physmap_enclosed(a)     isphysmem(a)
336 
337 /*
338  * gPhysBase/Size only represent kernel-managed memory. These globals represent
339  * the actual DRAM base address and size as reported by iBoot through the device
340  * tree.
341  */
342 #include <stdint.h>
343 extern uint64_t                 gDramBase, gDramSize;
344 #define is_dram_addr(addr)      (((uint64_t)(addr) - gDramBase) < gDramSize)
345 
346 #endif /* MACH_KERNEL_PRIVATE */
347 
348 #ifdef  XNU_KERNEL_PRIVATE
349 
350 #if KASAN
351 /* Increase the stack sizes to account for the redzones that get added to every
352  * stack object. */
353 # define KERNEL_STACK_SIZE      (4*4*4096)
354 #elif DEBUG
355 /**
356  * Increase the stack size to account for less efficient use of stack space when
357  * compiling with -O0.
358  */
359 # define KERNEL_STACK_SIZE      (2*4*4096)
360 #else
361 /*
362  * KERNEL_STACK_MULTIPLIER can be defined externally to get a larger
363  * kernel stack size. For example, adding "-DKERNEL_STACK_MULTIPLIER=2"
364  * helps avoid kernel stack overflows when compiling with "-O0".
365  */
366 #ifndef KERNEL_STACK_MULTIPLIER
367 #define KERNEL_STACK_MULTIPLIER (1)
368 #endif /* KERNEL_STACK_MULTIPLIER */
369 # define KERNEL_STACK_SIZE      (4*4096*KERNEL_STACK_MULTIPLIER)
370 #endif /* XNU_KERNEL_PRIVATE */
371 
372 #define INTSTACK_SIZE           (4*4096)
373 
374 #ifdef __arm64__
375 #define EXCEPSTACK_SIZE         (4*4096)
376 #else
377 #define FIQSTACK_SIZE           (4096)
378 #endif
379 
380 #if defined (__arm__)
381 #define HIGH_EXC_VECTORS        ((vm_address_t) 0xFFFF0000)
382 #endif
383 
384 /*
385  * TODO: We're hardcoding the expected virtual TEXT base here;
386  * that gives us an ugly dependency on a linker argument in
387  * the make files.  Clean this up, so we don't hardcode it
388  * twice; this is nothing but trouble.
389  */
390 #if defined (__arm__)
391 #define VM_KERNEL_LINK_ADDRESS  ((vm_address_t) 0x80000000)
392 #elif defined (__arm64__)
393 /* VM_KERNEL_LINK_ADDRESS defined in makedefs/MakeInc.def for arm64 platforms */
394 #else
395 #error architecture not supported
396 #endif
397 
398 #endif  /* MACH_KERNEL_PRIVATE */
399 #endif  /* KERNEL */
400 
401 #endif  /* !__ASSEMBLER__ */
402 
403 #define SWI_SYSCALL     0x80
404 
405 #endif /* defined (__arm__) || defined (__arm64__) */
406 
407 #endif  /* _MACH_ARM_VM_PARAM_H_ */
408