xref: /xnu-11417.140.69/osfmk/vm/vm_memtag.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _MACH_VM_MEMTAG_H_
29 #define _MACH_VM_MEMTAG_H_
30 
31 #ifdef  KERNEL
32 
33 #if __arm64__
34 #include <pexpert/arm64/board_config.h>
35 #endif /* __arm64__ */
36 
37 #include <kern/assert.h>
38 #include <mach/vm_types.h>
39 #include <sys/_types/_caddr_t.h>
40 
41 
42 #if KASAN_TBI
43 #define ENABLE_MEMTAG_INTERFACES        1
44 #define ENABLE_MEMTAG_MANIPULATION_API  1
45 #endif
46 
47 
48 
49 
50 #if defined(ENABLE_MEMTAG_INTERFACES)
51 
52 __BEGIN_DECLS
53 
54 /* Zero-out a tagged memory region performing the minimum set of mandatory checks. */
55 extern void vm_memtag_fast_checked_bzero(void *tagged_buf, vm_size_t n);
56 
57 /*
58  * Given a naked address, extract the metadata from memory and add it to
59  * the correct pointer metadata.
60  */
61 extern vm_map_address_t vm_memtag_load_tag(vm_map_address_t naked_address);
62 
63 /*
64  * Given a tagged pointer and a size, update the associated backing metadata
65  * to match the pointer metadata.
66  */
67 extern void
68 vm_memtag_store_tag(caddr_t tagged_address, vm_size_t size);
69 
70 /* Randomly assign a tag to the current chunk of memory. */
71 extern caddr_t
72 vm_memtag_generate_and_store_tag(caddr_t address, vm_size_t size);
73 
74 /*
75  * When passed a tagged pointer, verify that the pointer metadata matches
76  * the backing storage metadata.
77  */
78 extern void
79 vm_memtag_verify_tag(vm_map_address_t tagged_address);
80 
81 /*
82  * Copy metadata between two mappings whenever we are relocating memory.
83  */
84 extern void
85 vm_memtag_relocate_tags(vm_address_t new_address, vm_address_t old_address, vm_size_t size);
86 
87 /* Temporarily enable/disable memtag checking. */
88 extern void
89 vm_memtag_enable_checking(void);
90 extern void
91 vm_memtag_disable_checking(void);
92 
93 /*
94  * Zeroing operations traditionally happen on large amount of memory (often pages)
95  * and tend to span over several different regions with different memtags. Implement
96  * variants of bzero that capture both performing this operation without checking
97  * (vm_memtag_bzero_unchecked) and by optimizing checking behavior (vm_memtag_bzero_fast_checked)
98  */
99 extern void
100 vm_memtag_bzero_fast_checked(void *tagged_buf, vm_size_t n);
101 extern void
102 vm_memtag_bzero_unchecked(void *tagged_buf, vm_size_t n);
103 
104 __END_DECLS
105 
106 #else /* ENABLE_MEMTAG_INTERFACES */
107 
108 
109 #if KASAN_TBI
110 #error "vm_memtag interfaces should be defined whenever KASAN-TBI is enabled"
111 #endif /* KASAN_TBI */
112 
113 #define vm_memtag_fast_checked_bzero(p, s)      bzero(p, s)
114 #define vm_memtag_load_tag(a)                   (a)
115 #define vm_memtag_store_tag(a, s)               do { } while (0)
116 #define vm_memtag_generate_and_store_tag(a, s)  (a)
117 #define vm_memtag_relocate_tags(n, o, l)        do { } while (0)
118 #define vm_memtag_enable_checking()             do { } while (0)
119 #define vm_memtag_disable_checking()            do { } while (0)
120 #define vm_memtag_bzero_fast_checked(b, n)      bzero(b, n)
121 #define vm_memtag_bzero_unchecked(b, n)         bzero(b, n)
122 
123 #endif /* ENABLE_MEMTAG_INTERFACES */
124 
125 #if defined(ENABLE_MEMTAG_MANIPULATION_API)
126 
127 __BEGIN_DECLS
128 /*
129  * Helper functions to manipulate tagged pointers. If more implementors of
130  * the vm_memtag interface beyond KASAN-TBI were to come, then these definitions
131  * should be ifdef guarded properly.
132  */
133 
134 #define VM_MEMTAG_PTR_SIZE         56
135 #define VM_MEMTAG_TAG_SIZE          4
136 #define VM_MEMTAG_UPPER_SIZE        4
137 
138 typedef uint8_t vm_memtag_t;
139 
140 union vm_memtag_ptr {
141 	long value;
142 
143 	struct {
144 		long ptr_bits:                  VM_MEMTAG_PTR_SIZE;
145 		vm_memtag_t ptr_tag:            VM_MEMTAG_TAG_SIZE;
146 		long ptr_upper:                 VM_MEMTAG_UPPER_SIZE;
147 	};
148 };
149 
150 static inline vm_map_address_t
vm_memtag_insert_tag(vm_map_address_t naked_ptr,vm_memtag_t tag)151 vm_memtag_insert_tag(vm_map_address_t naked_ptr, vm_memtag_t tag)
152 {
153 	union vm_memtag_ptr p = {
154 		.value = (long)naked_ptr,
155 	};
156 
157 	p.ptr_tag = tag;
158 	return (vm_map_address_t)p.value;
159 }
160 
161 static inline vm_memtag_t
vm_memtag_extract_tag(vm_map_address_t tagged_ptr)162 vm_memtag_extract_tag(vm_map_address_t tagged_ptr)
163 {
164 	union vm_memtag_ptr p = {
165 		.value = (long)tagged_ptr,
166 	};
167 
168 	return p.ptr_tag;
169 }
170 
171 /*
172  * when passed a tagged pointer, strip away only the tag bits with their canonical
173  * value. Since these are used in a number of frequently called checks
174  * (e.g. when packing VM pointers), the following definition hardcodes the
175  * tag value to achieve optimal codegen and no external calls.
176  */
177 #define vm_memtag_canonicalize_kernel(addr)     vm_memtag_insert_tag(addr, 0xF)
178 #define vm_memtag_canonicalize_user(addr)       vm_memtag_insert_tag(addr, 0x0)
179 
180 extern vm_map_address_t
181 vm_memtag_canonicalize(vm_map_t map, vm_map_address_t addr);
182 
183 __END_DECLS
184 
185 #else /* ENABLE_MEMTAG_MANIPULATION_API */
186 
187 
188 #if KASAN_TBI
189 #error "vm_memtag manipulation APIs should be defined whenever KASAN-TBI is enabled"
190 #endif /* KASAN_TBI */
191 
192 #define vm_memtag_insert_tag(p, t)              (p)
193 #define vm_memtag_extract_tag(p)                (0xF)
194 #define vm_memtag_canonicalize(m, a)            (a)
195 #define vm_memtag_canonicalize_user(a)          (a)
196 #define vm_memtag_canonicalize_kernel(a)        (a)
197 
198 #endif /* ENABLE_MEMTAG_MANIPULATION_API */
199 
200 #endif  /* KERNEL */
201 
202 #endif  /* _MACH_VM_MEMTAG_H_ */
203