xref: /xnu-12377.61.12/osfmk/vm/vm_memtag.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _MACH_VM_MEMTAG_H_
29 #define _MACH_VM_MEMTAG_H_
30 
31 #ifdef  KERNEL
32 
33 #if __arm64__
34 #include <pexpert/arm64/board_config.h>
35 #endif /* __arm64__ */
36 
37 #include <kern/assert.h>
38 #include <mach/vm_types.h>
39 #include <sys/_types/_caddr_t.h>
40 
41 #if HAS_MTE
42 #define ENABLE_MEMTAG_INTERFACES        1
43 #define ENABLE_MEMTAG_MANIPULATION_API  1
44 #endif
45 
46 #if KASAN_TBI
47 #define ENABLE_MEMTAG_INTERFACES        1
48 #define ENABLE_MEMTAG_MANIPULATION_API  1
49 #endif
50 
51 #if HAS_MTE_EMULATION_SHIMS
52 #define ENABLE_MEMTAG_MANIPULATION_API  1
53 #endif
54 
55 #if HAS_MTE && HAS_MTE_EMULATION_SHIMS
56 #error HAS_MTE and HAS_MTE_EMULATION_SHIMS is not a supported configuration
57 #endif /* HAS_MTE && HAS_MTE_EMULATION_SHIMS */
58 
59 #if HAS_MTE_EMULATION_SHIMS && KASAN_TBI
60 #error HAS_MTE_EUMATION_SHIMS and KASAN_TBI is not a supported configuration
61 #endif /* KASAN_TBI && HAS_MTE_EMULATION_SHIMS */
62 
63 #if defined(ENABLE_MEMTAG_INTERFACES)
64 
65 __BEGIN_DECLS
66 
67 /* Zero-out a tagged memory region performing the minimum set of mandatory checks. */
68 extern void vm_memtag_fast_checked_bzero(void *tagged_buf, vm_size_t n);
69 
70 /*
71  * Given a naked address, extract the metadata from memory and add it to
72  * the correct pointer metadata.
73  */
74 extern vm_map_address_t vm_memtag_load_tag(vm_map_address_t naked_address);
75 
76 /*
77  * Given a tagged pointer and a size, update the associated backing metadata
78  * to match the pointer metadata.
79  */
80 extern void
81 vm_memtag_store_tag(caddr_t tagged_address, vm_size_t size);
82 
83 /* Randomly assign a tag to the current chunk of memory. */
84 extern caddr_t
85 vm_memtag_generate_and_store_tag(caddr_t address, vm_size_t size);
86 
87 /*
88  * When passed a tagged pointer, verify that the pointer metadata matches
89  * the backing storage metadata.
90  */
91 extern void
92 vm_memtag_verify_tag(vm_map_address_t tagged_address);
93 
94 /*
95  * Copy metadata between two mappings whenever we are relocating memory.
96  */
97 extern void
98 vm_memtag_relocate_tags(vm_address_t new_address, vm_address_t old_address, vm_size_t size);
99 
100 /* Temporarily enable/disable memtag checking. */
101 extern void
102 vm_memtag_enable_checking(void);
103 extern void
104 vm_memtag_disable_checking(void);
105 
106 /*
107  * Zeroing operations traditionally happen on large amount of memory (often pages)
108  * and tend to span over several different regions with different memtags. Implement
109  * variants of bzero that capture both performing this operation without checking
110  * (vm_memtag_bzero_unchecked) and by optimizing checking behavior (vm_memtag_bzero_fast_checked)
111  */
112 extern void
113 vm_memtag_bzero_fast_checked(void *tagged_buf, vm_size_t n);
114 extern void
115 vm_memtag_bzero_unchecked(void *tagged_buf, vm_size_t n);
116 
117 __END_DECLS
118 
119 #else /* ENABLE_MEMTAG_INTERFACES */
120 
121 #if HAS_MTE
122 #error "vm_memtag interfaces should be defined whenever MTE is available"
123 #endif /* HAS_MTE */
124 
125 #if KASAN_TBI
126 #error "vm_memtag interfaces should be defined whenever KASAN-TBI is enabled"
127 #endif /* KASAN_TBI */
128 
129 #define vm_memtag_fast_checked_bzero(p, s)      bzero(p, s)
130 #define vm_memtag_load_tag(a)                   (a)
131 #define vm_memtag_store_tag(a, s)               do { } while (0)
132 #define vm_memtag_generate_and_store_tag(a, s)  (a)
133 #define vm_memtag_relocate_tags(n, o, l)        do { } while (0)
134 #define vm_memtag_enable_checking()             do { } while (0)
135 #define vm_memtag_disable_checking()            do { } while (0)
136 #define vm_memtag_bzero_fast_checked(b, n)      bzero(b, n)
137 #define vm_memtag_bzero_unchecked(b, n)         bzero(b, n)
138 
139 #endif /* ENABLE_MEMTAG_INTERFACES */
140 
141 #if defined(ENABLE_MEMTAG_MANIPULATION_API)
142 
143 __BEGIN_DECLS
144 /*
145  * Helper functions to manipulate tagged pointers. If more implementors of
146  * the vm_memtag interface beyond KASAN-TBI were to come, then these definitions
147  * should be ifdef guarded properly.
148  */
149 
150 #define VM_MEMTAG_PTR_SIZE         56
151 #define VM_MEMTAG_TAG_SIZE          4
152 #define VM_MEMTAG_UPPER_SIZE        4
153 
154 typedef uint8_t vm_memtag_t;
155 
156 union vm_memtag_ptr {
157 	long value;
158 
159 	struct {
160 		long ptr_bits:                  VM_MEMTAG_PTR_SIZE;
161 		vm_memtag_t ptr_tag:            VM_MEMTAG_TAG_SIZE;
162 		long ptr_upper:                 VM_MEMTAG_UPPER_SIZE;
163 	};
164 };
165 
166 static inline vm_map_address_t
vm_memtag_insert_tag(vm_map_address_t naked_ptr,vm_memtag_t tag)167 vm_memtag_insert_tag(vm_map_address_t naked_ptr, vm_memtag_t tag)
168 {
169 	union vm_memtag_ptr p = {
170 		.value = (long)naked_ptr,
171 	};
172 
173 	p.ptr_tag = tag;
174 	return (vm_map_address_t)p.value;
175 }
176 
177 static inline vm_memtag_t
vm_memtag_extract_tag(vm_map_address_t tagged_ptr)178 vm_memtag_extract_tag(vm_map_address_t tagged_ptr)
179 {
180 	union vm_memtag_ptr p = {
181 		.value = (long)tagged_ptr,
182 	};
183 
184 	return p.ptr_tag;
185 }
186 
187 /*
188  * when passed a tagged pointer, strip away only the tag bits with their canonical
189  * value. Since these are used in a number of frequently called checks
190  * (e.g. when packing VM pointers), the following definition hardcodes the
191  * tag value to achieve optimal codegen and no external calls.
192  */
193 #ifndef __BUILDING_XNU_LIBRARY__
194 #define vm_memtag_canonicalize_kernel(addr)     vm_memtag_insert_tag(addr, 0xF)
195 #else /* __BUILDING_XNU_LIBRARY__ */
196 #define vm_memtag_canonicalize_kernel(addr)     vm_memtag_insert_tag(addr, 0x0)
197 #endif/* __BUILDING_XNU_LIBRARY__ */
198 #define vm_memtag_canonicalize_user(addr)       vm_memtag_insert_tag(addr, 0x0)
199 
200 extern vm_map_address_t
201 vm_memtag_canonicalize(vm_map_t map, vm_map_address_t addr);
202 
203 __END_DECLS
204 
205 #else /* ENABLE_MEMTAG_MANIPULATION_API */
206 
207 #if HAS_MTE
208 #error "vm_memtag manipulation APIs should be defined whenever MTE is available"
209 #endif /* HAS_MTE */
210 
211 #if KASAN_TBI
212 #error "vm_memtag manipulation APIs should be defined whenever KASAN-TBI is enabled"
213 #endif /* KASAN_TBI */
214 
215 #define vm_memtag_insert_tag(p, t)              (p)
216 #define vm_memtag_extract_tag(p)                (0xF)
217 #define vm_memtag_canonicalize(m, a)            (a)
218 #define vm_memtag_canonicalize_user(a)          (a)
219 #define vm_memtag_canonicalize_kernel(a)        (a)
220 
221 #endif /* ENABLE_MEMTAG_MANIPULATION_API */
222 
223 #endif  /* KERNEL */
224 
225 #endif  /* _MACH_VM_MEMTAG_H_ */
226