xref: /xnu-12377.1.9/osfmk/mach/vm_param.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	mach/vm_param.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Machine independent virtual memory parameters.
64  *
65  */
66 
67 #ifndef _MACH_VM_PARAM_H_
68 #define _MACH_VM_PARAM_H_
69 
70 #include <mach/machine/vm_param.h>
71 
72 #ifdef  KERNEL
73 
74 #ifndef ASSEMBLER
75 #include <mach/vm_types.h>
76 #endif  /* ASSEMBLER */
77 
78 #include <os/base.h>
79 #include <os/overflow.h>
80 
81 /*
82  *	The machine independent pages are refered to as PAGES.  A page
83  *	is some number of hardware pages, depending on the target machine.
84  */
85 
86 #ifndef ASSEMBLER
87 
88 #define PAGE_SIZE_64 (unsigned long long)PAGE_SIZE              /* pagesize in addr units */
89 #define PAGE_MASK_64 (unsigned long long)PAGE_MASK              /* mask for off in page */
90 
91 /*
92  *	Convert addresses to pages and vice versa.  No rounding is used.
93  *      The atop_32 and ptoa_32 macros should not be use on 64 bit types.
94  *      The round_page_64 and trunc_page_64 macros should be used instead.
95  */
96 
97 #define atop_32(x) ((uint32_t)(x) >> PAGE_SHIFT)
98 #define ptoa_32(x) ((uint32_t)(x) << PAGE_SHIFT)
99 #define atop_64(x) ((uint64_t)(x) >> PAGE_SHIFT)
100 #define ptoa_64(x) ((uint64_t)(x) << PAGE_SHIFT)
101 
102 #define atop_kernel(x) ((vm_address_t)(x) >> PAGE_SHIFT)
103 #define ptoa_kernel(x) ((vm_address_t)(x) << PAGE_SHIFT)
104 
105 /*
106  *      While the following block is enabled, the legacy atop and ptoa
107  *      macros will behave correctly.  If not, they will generate
108  *      invalid lvalue errors.
109  */
110 
111 #if 1
112 #define atop(x) ((vm_address_t)(x) >> PAGE_SHIFT)
113 #define ptoa(x) ((vm_address_t)(x) << PAGE_SHIFT)
114 #else
115 #define atop(x) (0UL = 0)
116 #define ptoa(x) (0UL = 0)
117 #endif
118 
119 /*
120  *	Page-size rounding macros for the Public fixed-width VM types.
121  */
122 #define mach_vm_round_page(x) (((mach_vm_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
123 #define mach_vm_trunc_page(x) ((mach_vm_offset_t)(x) & ~((signed)PAGE_MASK))
124 
125 #define round_page_overflow(in, out) __os_warn_unused(({ \
126 	        bool __ovr = os_add_overflow(in, (__typeof__(*out))PAGE_MASK, out); \
127 	        *out &= ~((__typeof__(*out))PAGE_MASK); \
128 	        __ovr; \
129 	}))
130 
131 static inline int OS_WARN_RESULT
mach_vm_round_page_overflow(mach_vm_offset_t in,mach_vm_offset_t * out)132 mach_vm_round_page_overflow(mach_vm_offset_t in, mach_vm_offset_t *out)
133 {
134 	return round_page_overflow(in, out);
135 }
136 
137 #define memory_object_round_page(x) (((memory_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
138 #define memory_object_trunc_page(x) ((memory_object_offset_t)(x) & ~((signed)PAGE_MASK))
139 
140 /*
141  *	Rounding macros for the legacy (scalable with the current task's
142  *	address space size) VM types.
143  */
144 
145 #define round_page(x) (((vm_offset_t)(x) + PAGE_MASK) & ~((vm_offset_t)PAGE_MASK))
146 #define trunc_page(x) ((vm_offset_t)(x) & ~((vm_offset_t)PAGE_MASK))
147 
148 /*
149  *	Round off or truncate to the nearest page.  These will work
150  *	for either addresses or counts.  (i.e. 1 byte rounds to 1 page
151  *	bytes.  The round_page_32 and trunc_page_32 macros should not be
152  *      use on 64 bit types.  The round_page_64 and trunc_page_64 macros
153  *      should be used instead.
154  *
155  *	These should only be used in the rare case the size of the address
156  *	or length is hard-coded as 32 or 64 bit.  Otherwise, the macros
157  *	associated with the specific VM type should be used.
158  */
159 
160 #define round_page_32(x) (((uint32_t)(x) + PAGE_MASK) & ~((uint32_t)PAGE_MASK))
161 #define trunc_page_32(x) ((uint32_t)(x) & ~((uint32_t)PAGE_MASK))
162 #define round_page_64(x) (((uint64_t)(x) + PAGE_MASK_64) & ~((uint64_t)PAGE_MASK_64))
163 #define trunc_page_64(x) ((uint64_t)(x) & ~((uint64_t)PAGE_MASK_64))
164 
165 #define round_page_mask_32(x, mask) (((uint32_t)(x) + (mask)) & ~((uint32_t)(mask)))
166 #define trunc_page_mask_32(x, mask) ((uint32_t)(x) & ~((uint32_t)(mask)))
167 #define round_page_mask_64(x, mask) (((uint64_t)(x) + (mask)) & ~((uint64_t)(mask)))
168 #define trunc_page_mask_64(x, mask) ((uint64_t)(x) & ~((uint64_t)(mask)))
169 
170 /*
171  *      Enable the following block to find uses of xxx_32 macros that should
172  *      be xxx_64.  These macros only work in C code, not C++.  The resulting
173  *      binaries are not functional.  Look for invalid lvalue errors in
174  *      the compiler output.
175  *
176  *      Enabling the following block will also find use of the xxx_64 macros
177  *      that have been passed pointers.  The parameters should be case to an
178  *      unsigned long type first.  Look for invalid operands to binary + error
179  *      in the compiler output.
180  */
181 
182 #if 0
183 #undef atop_32
184 #undef ptoa_32
185 #undef round_page_32
186 #undef trunc_page_32
187 #undef atop_64
188 #undef ptoa_64
189 #undef round_page_64
190 #undef trunc_page_64
191 
192 #ifndef __cplusplus
193 
194 #define atop_32(x) \
195     (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
196 	(*(long *)0), \
197 	(0UL)) = 0)
198 
199 #define ptoa_32(x) \
200     (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
201 	(*(long *)0), \
202 	(0UL)) = 0)
203 
204 #define round_page_32(x) \
205     (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
206 	(*(long *)0), \
207 	(0UL)) = 0)
208 
209 #define trunc_page_32(x) \
210     (__builtin_choose_expr (sizeof(x) != sizeof(uint64_t), \
211 	(*(long *)0), \
212 	(0UL)) = 0)
213 #else
214 
215 #define atop_32(x) (0)
216 #define ptoa_32(x) (0)
217 #define round_page_32(x) (0)
218 #define trunc_page_32(x) (0)
219 
220 #endif /* ! __cplusplus */
221 
222 #define atop_64(x) ((uint64_t)((x) + (uint8_t *)0))
223 #define ptoa_64(x) ((uint64_t)((x) + (uint8_t *)0))
224 #define round_page_64(x) ((uint64_t)((x) + (uint8_t *)0))
225 #define trunc_page_64(x) ((uint64_t)((x) + (uint8_t *)0))
226 
227 #endif
228 
229 /*
230  *	Determine whether an address is page-aligned, or a count is
231  *	an exact page multiple.
232  */
233 
234 #define page_aligned(x) (((x) & PAGE_MASK) == 0)
235 
236 extern vm_size_t        mem_size;               /* 32-bit size of memory - limited by maxmem - deprecated */
237 extern uint64_t         max_mem;                /* 64-bit size of memory - limited by maxmem */
238 
239 /*
240  * The VM compressor pager uses 32-bit page numbers, so this limits the size
241  * of anonymous memory objects to 0xffffffff pages.
242  * When we need to allocate a chunk of anonymous memory over that size,
243  * we have to allocate more than one chunk.
244  */
245 #define ANON_MAX_PAGES   0xFFFFFFFFULL
246 #define ANON_MAX_SIZE (ANON_MAX_PAGES << PAGE_SHIFT)
247 /*
248  * Work-around for <rdar://problem/6626493>
249  * Break large anonymous memory areas into 128MB chunks to alleviate
250  * the cost of copying when copy-on-write is not possible because a small
251  * portion of it being wired.
252  */
253 #define ANON_CHUNK_SIZE (128ULL * 1024 * 1024) /* 128MB */
254 
255 /*
256  * The 'medium' malloc allocator would like its regions
257  * to be chunked up into MALLOC_MEDIUM_CHUNK_SIZE chunks
258  * and backed by different objects. This avoids contention
259  * on a single large object and showed solid improvements on high
260  * core machines with workloads involving video and graphics processing.
261  */
262 #define MALLOC_MEDIUM_CHUNK_SIZE (8ULL * 1024 * 1024) /* 8 MB */
263 
264 #ifdef KERNEL_PRIVATE
265 extern uint64_t         sane_size;              /* Memory size to use for defaults calculations */
266 #endif /* KERNEL_PRIVATE */
267 
268 #ifdef  XNU_KERNEL_PRIVATE
269 
270 #include <kern/debug.h>
271 #include <vm/vm_memtag.h>
272 
273 extern uint64_t         mem_actual;             /* 64-bit size of memory - not limited by maxmem */
274 extern uint64_t         max_mem_actual;         /* Size of physical memory adjusted by maxmem */
275 extern addr64_t         vm_last_addr;           /* Highest kernel virtual address known to the VM system */
276 extern addr64_t         first_avail_phys;       /* First available physical address */
277 
278 extern const vm_offset_t        vm_min_kernel_address;
279 extern const vm_offset_t        vm_max_kernel_address;
280 
281 extern vm_offset_t              vm_kernel_stext;
282 extern vm_offset_t              vm_kernel_etext;
283 extern vm_offset_t              vm_kernel_slid_base;
284 extern vm_offset_t              vm_kernel_slid_top;
285 extern vm_offset_t              vm_kernel_slide;
286 
287 #if CONFIG_SPTM
288 typedef struct {
289 	vm_offset_t unslid_base;
290 	vm_offset_t unslid_top;
291 	vm_offset_t slid_base;
292 	vm_offset_t slid_top;
293 	vm_offset_t slide;
294 } vm_image_offsets;
295 
296 extern vm_image_offsets         vm_sptm_offsets;
297 extern vm_image_offsets         vm_txm_offsets;
298 #endif /* CONFIG_SPTM */
299 
300 extern vm_offset_t              vm_kernel_addrperm;
301 extern vm_offset_t              vm_kext_base;
302 extern vm_offset_t              vm_kext_top;
303 extern vm_offset_t              vm_kernel_base;
304 extern vm_offset_t              vm_kernel_top;
305 extern vm_offset_t              vm_hib_base;
306 
307 extern vm_offset_t              vm_kernel_builtinkmod_text;
308 extern vm_offset_t              vm_kernel_builtinkmod_text_end;
309 
310 /**
311  * While these function's implementations are machine specific, due to the need
312  * to prevent header file circular dependencies, they need to be externed here
313  * for usage in the sliding/unsliding macros.
314  */
315 __BEGIN_DECLS
316 vm_offset_t ml_static_slide(vm_offset_t vaddr);
317 vm_offset_t ml_static_unslide(vm_offset_t vaddr);
318 __END_DECLS
319 
320 /**
321  * Determine whether a given address is an address within a static region (i.e.,
322  * coming from TEXT or DATA) that was slid during boot. Addresses of this type
323  * should have the slide removed before exposing them to userspace so as to not
324  * leak the slide itself to userspace.
325  *
326  * @param addr The virtual address to check.
327  *
328  * @return True if the address is a static/slid kernel address, false otherwise.
329  */
330 static inline bool
vm_is_addr_slid(vm_offset_t addr)331 vm_is_addr_slid(vm_offset_t addr)
332 {
333 	const vm_offset_t stripped_addr = (vm_offset_t)VM_KERNEL_STRIP_PTR(addr);
334 	const bool is_slid_kern_addr =
335 	    (stripped_addr >= vm_kernel_slid_base) && (stripped_addr < vm_kernel_slid_top);
336 
337 #if CONFIG_SPTM
338 	const bool is_slid_sptm_addr =
339 	    (stripped_addr >= vm_sptm_offsets.slid_base) && (stripped_addr < vm_sptm_offsets.slid_top);
340 
341 	const bool is_slid_txm_addr =
342 	    (stripped_addr >= vm_txm_offsets.slid_base) && (stripped_addr < vm_txm_offsets.slid_top);
343 
344 	return is_slid_kern_addr || is_slid_sptm_addr || is_slid_txm_addr;
345 #else
346 	return is_slid_kern_addr;
347 #endif /* CONFIG_SPTM */
348 }
349 
350 #define VM_KERNEL_IS_SLID(_o) (vm_is_addr_slid((vm_offset_t)(_o)))
351 
352 #define VM_KERNEL_SLIDE(_u) (ml_static_slide((vm_offset_t)(_u)))
353 
354 /*
355  * The following macros are to be used when exposing kernel addresses to
356  * userspace via any of the various debug or info facilities that might exist
357  * (e.g. stackshot, proc_info syscall, etc.). It is important to understand
358  * the goal of each macro and choose the right one depending on what you are
359  * trying to do. Misuse of these macros can result in critical data leaks
360  * which in turn lead to all sorts of system vulnerabilities. It is invalid to
361  * call these macros on a non-kernel address (NULL is allowed).
362  *
363  * VM_KERNEL_UNSLIDE:
364  *     Use this macro when you are exposing an address to userspace which is
365  *     *guaranteed* to be a "static" kernel or kext address (i.e. coming from text
366  *     or data sections). These are the addresses which get "slid" via ASLR on
367  *     kernel or kext load, and it's precisely the slide value we are trying to
368  *     protect from userspace.
369  *
370  * VM_KERNEL_ADDRHIDE:
371  *     Use when exposing an address for internal purposes: debugging, tracing,
372  *     etc. The address will be unslid if necessary. Other addresses will be
373  *     hidden on customer builds, and unmodified on internal builds.
374  *
375  * VM_KERNEL_ADDRHASH:
376  *     Use this macro when exposing a kernel address to userspace on customer
377  *     builds. The address can be from the static kernel or kext regions, or the
378  *     kernel heap. The address will be unslid or hashed as appropriate.
379  *
380  *
381  * ** SECURITY WARNING: The following macros can leak kernel secrets.
382  *                      Use *only* in performance *critical* code.
383  *
384  * VM_KERNEL_ADDRPERM:
385  * VM_KERNEL_UNSLIDE_OR_PERM:
386  *     Use these macros when exposing a kernel address to userspace on customer
387  *     builds. The address can be from the static kernel or kext regions, or the
388  *     kernel heap. The address will be unslid or permuted as appropriate.
389  *
390  * Nesting of these macros should be considered invalid.
391  */
392 
393 #define __DO_UNSLIDE(_v) (ml_static_unslide((vm_offset_t)VM_KERNEL_STRIP_PTR(_v)))
394 
395 #if DEBUG || DEVELOPMENT
396 #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)VM_KERNEL_STRIP_PTR(_v))
397 #else
398 #define VM_KERNEL_ADDRHIDE(_v) (VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_address_t)0)
399 #endif /* DEBUG || DEVELOPMENT */
400 
401 #define VM_KERNEL_ADDRHASH(_v) vm_kernel_addrhash((vm_offset_t)(_v))
402 
403 /*
404  * ML_ADDRPERM is defined as a macro that dispatches to the correct machine version.
405  * For systems that support the generic ml_addrperm version, the actual slide address is unused.
406  */
407 #define VM_KERNEL_UNSLIDE_OR_PERM(_v) ({ \
408 	        VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : \
409 	        VM_KERNEL_ADDRESS(_v) ? (ML_ADDRPERM((uintptr_t)VM_KERNEL_STRIP_UPTR(_v), vm_kernel_addrperm)) : \
410 	        (vm_offset_t)VM_KERNEL_STRIP_PTR(_v); \
411 	})
412 
413 #define VM_KERNEL_UNSLIDE(_v) ({ \
414 	        VM_KERNEL_IS_SLID(_v) ? __DO_UNSLIDE(_v) : (vm_offset_t)0; \
415 	})
416 
417 #define VM_KERNEL_ADDRPERM(_v) VM_KERNEL_UNSLIDE_OR_PERM(_v)
418 
419 #undef mach_vm_round_page
420 #undef round_page
421 #undef round_page_32
422 #undef round_page_64
423 
424 static inline int
mach_vm_size_unit(mach_vm_size_t size)425 mach_vm_size_unit(mach_vm_size_t size)
426 {
427 	uint32_t bits = 64u - (uint32_t)__builtin_clzll((size / 10) | 1);
428 
429 	return "BKMGTPE"[bits / 10];
430 }
431 
432 static inline uint32_t
mach_vm_size_pretty(mach_vm_size_t size)433 mach_vm_size_pretty(mach_vm_size_t size)
434 {
435 	uint32_t bits = 64u - (uint32_t)__builtin_clzll((size / 10) | 1);
436 
437 	return (uint32_t)(size >> (bits - bits % 10));
438 }
439 
440 static inline mach_vm_offset_t
mach_vm_round_page(mach_vm_offset_t x)441 mach_vm_round_page(mach_vm_offset_t x)
442 {
443 	if (round_page_overflow(x, &x)) {
444 		panic("overflow detected");
445 	}
446 	return x;
447 }
448 
449 static inline vm_offset_t
round_page(vm_offset_t x)450 round_page(vm_offset_t x)
451 {
452 	if (round_page_overflow(x, &x)) {
453 		panic("overflow detected");
454 	}
455 	return x;
456 }
457 
458 static inline mach_vm_offset_t
round_page_64(mach_vm_offset_t x)459 round_page_64(mach_vm_offset_t x)
460 {
461 	if (round_page_overflow(x, &x)) {
462 		panic("overflow detected");
463 	}
464 	return x;
465 }
466 
467 static inline uint32_t
round_page_32(uint32_t x)468 round_page_32(uint32_t x)
469 {
470 	if (round_page_overflow(x, &x)) {
471 		panic("overflow detected");
472 	}
473 	return x;
474 }
475 
476 
477 /*!
478  * @typedef vm_packing_params_t
479  *
480  * @brief
481  * Data structure representing the packing parameters for a given packed pointer
482  * encoding.
483  *
484  * @discussion
485  * Several data structures wish to pack their pointers on less than 64bits
486  * on LP64 in order to save memory.
487  *
488  * Adopters are supposed to define 3 macros:
489  * - @c *_BITS:  number of storage bits used for the packing,
490  * - @c *_SHIFT: number of non significant low bits (expected to be 0),
491  * - @c *_BASE:  the base against which to encode.
492  *
493  * The encoding is a no-op when @c *_BITS is equal to @c __WORDSIZE and
494  * @c *_SHIFT is 0.
495  *
496  *
497  * The convenience macro @c VM_PACKING_PARAMS can be used to create
498  * a @c vm_packing_params_t structure out of those definitions.
499  *
500  * It is customary to declare a constant global per scheme for the sake
501  * of debuggers to be able to dynamically decide how to unpack various schemes.
502  *
503  *
504  * This uses 2 possible schemes (who both preserve @c NULL):
505  *
506  * 1. When the storage bits and shift are sufficiently large (strictly more than
507  *    VM_KERNEL_POINTER_SIGNIFICANT_BITS), a sign-extension scheme can be used.
508  *
509  *    This allows to represent any kernel pointer.
510  *
511  * 2. Else, a base-relative scheme can be used, typical bases are:
512  *
513  *     - @c KERNEL_PMAP_HEAP_RANGE_START when only pointers to heap (zone)
514  *       allocated objects need to be packed,
515  *
516  *     - @c VM_MIN_KERNEL_AND_KEXT_ADDRESS when pointers to kernel globals also
517  *       need this.
518  *
519  *    When such an ecoding is used, @c zone_restricted_va_max() must be taught
520  *    about it.
521  */
522 typedef struct vm_packing_params {
523 	vm_offset_t vmpp_base;
524 	uint8_t     vmpp_bits;
525 	uint8_t     vmpp_shift;
526 	bool        vmpp_base_relative;
527 } vm_packing_params_t;
528 
529 
530 /*!
531  * @macro VM_PACKING_IS_BASE_RELATIVE
532  *
533  * @brief
534  * Whether the packing scheme with those parameters will be base-relative.
535  */
536 #define VM_PACKING_IS_BASE_RELATIVE(ns) \
537 	(ns##_BITS + ns##_SHIFT <= VM_KERNEL_POINTER_SIGNIFICANT_BITS)
538 
539 
540 /*!
541  * @macro VM_PACKING_PARAMS
542  *
543  * @brief
544  * Constructs a @c vm_packing_params_t structure based on the convention that
545  * macros with the @c _BASE, @c _BITS and @c _SHIFT suffixes have been defined
546  * to the proper values.
547  */
548 #define VM_PACKING_PARAMS(ns) \
549 	(vm_packing_params_t){ \
550 	    .vmpp_base  = ns##_BASE, \
551 	    .vmpp_bits  = ns##_BITS, \
552 	    .vmpp_shift = ns##_SHIFT, \
553 	    .vmpp_base_relative = VM_PACKING_IS_BASE_RELATIVE(ns), \
554 	}
555 
556 /**
557  * @function vm_pack_pointer
558  *
559  * @brief
560  * Packs a pointer according to the specified parameters.
561  *
562  * @discussion
563  * The convenience @c VM_PACK_POINTER macro allows to synthesize
564  * the @c params argument.
565  *
566  * @param ptr           The pointer to pack.
567  * @param params        The encoding parameters.
568  * @returns             The packed pointer.
569  */
570 static inline vm_offset_t
vm_pack_pointer(vm_offset_t ptr,vm_packing_params_t params)571 vm_pack_pointer(vm_offset_t ptr, vm_packing_params_t params)
572 {
573 	if (ptr != 0) {
574 		ptr = vm_memtag_canonicalize_kernel(ptr);
575 	}
576 
577 	if (!params.vmpp_base_relative) {
578 		return ptr >> params.vmpp_shift;
579 	}
580 	if (ptr) {
581 		return (ptr - params.vmpp_base) >> params.vmpp_shift;
582 	}
583 	return (vm_offset_t)0;
584 }
585 #define VM_PACK_POINTER(ptr, ns) \
586 	vm_pack_pointer(ptr, VM_PACKING_PARAMS(ns))
587 
588 /**
589  * @function vm_unpack_pointer
590  *
591  * @brief
592  * Unpacks a pointer packed with @c vm_pack_pointer().
593  *
594  * @discussion
595  * The convenience @c VM_UNPACK_POINTER macro allows to synthesize
596  * the @c params argument.
597  *
598  * @param packed        The packed value to decode.
599  * @param params        The encoding parameters.
600  * @returns             The unpacked pointer.
601  */
602 static inline vm_offset_t
vm_unpack_pointer(vm_offset_t packed,vm_packing_params_t params)603 vm_unpack_pointer(vm_offset_t packed, vm_packing_params_t params)
604 {
605 	if (!params.vmpp_base_relative) {
606 		intptr_t addr = (intptr_t)packed;
607 		addr <<= __WORDSIZE - params.vmpp_bits;
608 		addr >>= __WORDSIZE - params.vmpp_bits - params.vmpp_shift;
609 		return vm_memtag_load_tag((vm_offset_t)addr);
610 	}
611 	if (packed) {
612 		return vm_memtag_load_tag((packed << params.vmpp_shift) + params.vmpp_base);
613 	}
614 	return (vm_offset_t)0;
615 }
616 #define VM_UNPACK_POINTER(packed, ns) \
617 	vm_unpack_pointer(packed, VM_PACKING_PARAMS(ns))
618 
619 /**
620  * @function vm_packing_max_packable
621  *
622  * @brief
623  * Returns the largest packable address for the given parameters.
624  *
625  * @discussion
626  * The convenience @c VM_PACKING_MAX_PACKABLE macro allows to synthesize
627  * the @c params argument.
628  *
629  * @param params        The encoding parameters.
630  * @returns             The largest packable pointer.
631  */
632 static inline vm_offset_t
vm_packing_max_packable(vm_packing_params_t params)633 vm_packing_max_packable(vm_packing_params_t params)
634 {
635 	if (!params.vmpp_base_relative) {
636 		return VM_MAX_KERNEL_ADDRESS;
637 	}
638 
639 	vm_offset_t ptr = params.vmpp_base +
640 	    (((1ul << params.vmpp_bits) - 1) << params.vmpp_shift);
641 
642 	return ptr >= params.vmpp_base ? ptr : VM_MAX_KERNEL_ADDRESS;
643 }
644 #define VM_PACKING_MAX_PACKABLE(ns) \
645 	vm_packing_max_packable(VM_PACKING_PARAMS(ns))
646 
647 
648 __abortlike
649 extern void
650 vm_packing_pointer_invalid(vm_offset_t ptr, vm_packing_params_t params);
651 
652 /**
653  * @function vm_verify_pointer_packable
654  *
655  * @brief
656  * Panics if the specified pointer cannot be packed with the specified
657  * parameters.
658  *
659  * @discussion
660  * The convenience @c VM_VERIFY_POINTER_PACKABLE macro allows to synthesize
661  * the @c params argument.
662  *
663  * The convenience @c VM_ASSERT_POINTER_PACKABLE macro allows to synthesize
664  * the @c params argument, and is erased when assertions are disabled.
665  *
666  * @param ptr           The packed value to decode.
667  * @param params        The encoding parameters.
668  */
669 static inline void
vm_verify_pointer_packable(vm_offset_t ptr,vm_packing_params_t params)670 vm_verify_pointer_packable(vm_offset_t ptr, vm_packing_params_t params)
671 {
672 	if (ptr != 0) {
673 		ptr = vm_memtag_canonicalize_kernel(ptr);
674 	}
675 
676 	if (ptr & ((1ul << params.vmpp_shift) - 1)) {
677 		vm_packing_pointer_invalid(ptr, params);
678 	}
679 	if (!params.vmpp_base_relative || ptr == 0) {
680 		return;
681 	}
682 	if (ptr <= params.vmpp_base || ptr > vm_packing_max_packable(params)) {
683 		vm_packing_pointer_invalid(ptr, params);
684 	}
685 }
686 #define VM_VERIFY_POINTER_PACKABLE(ptr, ns) \
687 	vm_verify_pointer_packable(ptr, VM_PACKING_PARAMS(ns))
688 
689 #if DEBUG || DEVELOPMENT
690 #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) \
691     VM_VERIFY_POINTER_PACKABLE(ptr, ns)
692 #else
693 #define VM_ASSERT_POINTER_PACKABLE(ptr, ns) ((void)(ptr))
694 #endif
695 
696 /**
697  * @function vm_verify_pointer_range
698  *
699  * @brief
700  * Panics if some pointers in the specified range can't be packed with the
701  * specified parameters.
702  *
703  * @param subsystem     The subsystem requiring the packing.
704  * @param min_address   The smallest address of the range.
705  * @param max_address   The largest address of the range.
706  * @param params        The encoding parameters.
707  */
708 extern void
709 vm_packing_verify_range(
710 	const char         *subsystem,
711 	vm_offset_t         min_address,
712 	vm_offset_t         max_address,
713 	vm_packing_params_t params);
714 
715 #endif  /* XNU_KERNEL_PRIVATE */
716 
717 extern vm_size_t        page_size;
718 extern vm_size_t        page_mask;
719 extern int              page_shift;
720 
721 /* We need a way to get rid of compiler warnings when we cast from   */
722 /* a 64 bit value to an address (which may be 32 bits or 64-bits).   */
723 /* An intptr_t is used convert the value to the right precision, and */
724 /* then to an address. This macro is also used to convert addresses  */
725 /* to 32-bit integers, which is a hard failure for a 64-bit kernel   */
726 #include <stdint.h>
727 #ifndef __CAST_DOWN_CHECK
728 #define __CAST_DOWN_CHECK
729 
730 #define CAST_DOWN( type, addr ) \
731     ( ((type)((uintptr_t) (addr)/(sizeof(type) < sizeof(uintptr_t) ? 0 : 1))) )
732 
733 #define CAST_DOWN_EXPLICIT( type, addr )  ( ((type)((uintptr_t) (addr))) )
734 
735 #endif /* __CAST_DOWN_CHECK */
736 
737 #endif  /* ASSEMBLER */
738 
739 #endif  /* KERNEL */
740 
741 #endif  /* _MACH_VM_PARAM_H_ */
742