1 /*
2 * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _MACH_VM_MEMTAG_H_
29 #define _MACH_VM_MEMTAG_H_
30
31 #ifdef KERNEL
32
33 #include <kern/assert.h>
34 #include <mach/vm_types.h>
35
36
37 #if KASAN_TBI
38 #define ENABLE_MEMTAG_INTERFACES 1
39 #endif
40
41 #if defined(ENABLE_MEMTAG_INTERFACES)
42
43 __BEGIN_DECLS
44
45 /* Zero-out a tagged memory region. */
46 extern void vm_memtag_bzero(void *tagged_buf, vm_size_t n);
47
48 /* Retrieve the tag metadata associated to the target memory address */
49 extern uint8_t vm_memtag_get_tag(vm_offset_t address);
50
51 /*
52 * Given a naked address, extract the metadata from memory and add it to
53 * the correct pointer metadata.
54 */
55 extern vm_offset_t vm_memtag_fixup_ptr(vm_offset_t naked_address);
56
57 /*
58 * Given a tagged pointer and a size, update the associated backing metadata
59 * to match the pointer metadata.
60 */
61 extern void
62 vm_memtag_set_tag(vm_offset_t tagged_address, vm_offset_t size);
63
64 /*
65 * Randomly assign a tag to the current chunk of memory. Memory metadata is
66 * not updated yet and must be committed through a call to vm_memtag_set_tag().
67 * This helper will implement a basic randomization algorithm that picks a
68 * random valid value for the tagging mechanism excluding the current and
69 * left/right adjacent metadata value. This approach is fault-conservative and
70 * only checks the adjacent memory locations if they fit within the same page.
71 */
72 extern vm_offset_t
73 vm_memtag_assign_tag(vm_offset_t address, vm_size_t size);
74
75 /*
76 * When passed a tagged pointer, verify that the pointer metadata matches
77 * the backing storage metadata.
78 */
79 extern void
80 vm_memtag_verify_tag(vm_offset_t tagged_address);
81
82 /*
83 * Copy metadata between to mappings whenever we are relocating memory.
84 */
85 extern void
86 vm_memtag_relocate_tags(vm_offset_t new_address, vm_offset_t old_address, vm_offset_t size);
87
88 /*
89 * Temporarily enable/disable memtag checking.
90 */
91 extern void
92 vm_memtag_enable_checking(void);
93 extern void
94 vm_memtag_disable_checking(void);
95
96
97 /*
98 * Helper functions to manipulate tagged pointers. If more implementors of
99 * the vm_memtag interface beyond KASAN-TBI were to come, then these definitions
100 * should be ifdef guarded properly.
101 */
102
103 #define VM_MEMTAG_PTR_SIZE 56
104 #define VM_MEMTAG_TAG_SIZE 4
105 #define VM_MEMTAG_UPPER_SIZE 4
106 #define VM_MEMTAG_BYTES_PER_TAG 16
107
108
109 union vm_memtag_ptr {
110 long value;
111
112 struct {
113 long ptr_bits: VM_MEMTAG_PTR_SIZE;
114 uint8_t ptr_tag: VM_MEMTAG_TAG_SIZE;
115 long ptr_upper: VM_MEMTAG_UPPER_SIZE;
116 };
117 };
118
119 static inline vm_offset_t
vm_memtag_add_ptr_tag(vm_offset_t naked_ptr,uint8_t tag)120 vm_memtag_add_ptr_tag(vm_offset_t naked_ptr, uint8_t tag)
121 {
122 union vm_memtag_ptr p = {
123 .value = (long)naked_ptr,
124 };
125
126 p.ptr_tag = tag;
127 return (vm_offset_t)p.value;
128 }
129
130 static inline uint8_t
vm_memtag_extract_tag(vm_offset_t tagged_ptr)131 vm_memtag_extract_tag(vm_offset_t tagged_ptr)
132 {
133 union vm_memtag_ptr p = {
134 .value = (long)tagged_ptr,
135 };
136
137 return p.ptr_tag;
138 }
139
140 __END_DECLS
141
142 /*
143 * when passed a tagged pointer, strip away the tag bits and return the
144 * canonical address. Since it's used in a number of frequently called checks
145 * (e.g. when packing VM pointers), the following definition hardcodes the
146 * tag value to achieve optimal codegen and no external calls.
147 */
148 #define vm_memtag_canonicalize_address(addr) vm_memtag_add_ptr_tag(addr, 0xF)
149 #define vm_memtag_canonicalize_user_address(addr) vm_memtag_add_ptr_tag(addr, 0x0)
150
151 #else /* ENABLE_MEMTAG_INTERFACES */
152
153
154 #if KASAN_TBI
155 #error "vm_memtag interfaces should be defined whenever KASAN-TBI is enabled"
156 #endif /* KASAN_TBI */
157
158 #define vm_memtag_bzero(p, s) bzero(p, s)
159 #define vm_memtag_get_tag(a) (0xF)
160 #define vm_memtag_fixup_ptr(a) (a)
161 #define vm_memtag_set_tag(a, s) do { } while (0)
162 #define vm_memtag_assign_tag(a, s) (a)
163 #define vm_memtag_add_ptr_tag(p, t) (p)
164 #define vm_memtag_extract_tag(p) (0xF)
165 #define vm_memtag_canonicalize_address(a) (a)
166 #define vm_memtag_relocate_tags(n, o, l) do { } while (0)
167 #define vm_memtag_enable_checking() do { } while (0)
168 #define vm_memtag_disable_checking() do { } while (0)
169
170
171 #endif /* ENABLE_MEMTAG_INTERFACES */
172
173 #endif /* KERNEL */
174
175 #endif /* _MACH_VM_MEMTAG_H_ */
176