xref: /xnu-12377.81.4/osfmk/arm64/mte_xnu.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _ARM64_MTE_XNU_H_
30 #define _ARM64_MTE_XNU_H_
31 
32 #if HAS_MTE
33 
34 #if XNU_KERNEL_PRIVATE
35 
36 #include <sys/types.h>
37 #include <machine/static_if.h>
38 #include <arm64/mte.h>
39 
40 __BEGIN_DECLS
41 
42 __options_closed_decl(mte_config_t, uint16_t, {
43 	/* Disable MTE */
44 	MTE_DISABLE                       = 0x0000,         /* Disable MTE tag checking */
45 	MTE_KERNEL_ENABLE                 = 0x0001,         /* Enable MTE tag checking in the kernel */
46 	MTE_KERNEL_ENABLE_PURE_DATA       = 0x0002,         /* Extend MTE tag checking to pure data in the kernel */
47 	MTE_USER_ENABLE                   = 0x0004,         /* Enable MTE tag checking in userspace */
48 	MTE_USER_FORCE_ENABLE_ALL         = 0x0040,         /* Force enable MTE on every userland process */
49 	MTE_DEBUG_TCO_STATE               = 0x0080,         /* Enable TCO debugging */
50 	MTE_PANIC_ON_NON_CANONICAL_PARAM  = 0x0100,         /* Panic whenever a non canonical address is passed to APIs expecting otherwise */
51 	MTE_PANIC_ON_ASYNC_FAULT          = 0x0200,         /* Panic whenever a tag check fault is detected in an async path */
52 });
53 
54 #if DEVELOPMENT || DEBUG
55 
56 /* We keep this for retro-compatibility with code, should be cleaned up */
57 extern bool is_mte_enabled;
58 
59 #define MTE_CONFIG_DEFAULT      (MTE_KERNEL_ENABLE | MTE_USER_ENABLE)
60 
61 STATIC_IF_KEY_DECLARE_TRUE(mte_config_kern_enabled);
62 STATIC_IF_KEY_DECLARE_FALSE(mte_config_kern_data_enabled);
63 STATIC_IF_KEY_DECLARE_TRUE(mte_config_user_enabled);
64 STATIC_IF_KEY_DECLARE_FALSE(mte_config_user_data_enabled);
65 STATIC_IF_KEY_DECLARE_FALSE(mte_config_force_all_enabled);
66 STATIC_IF_KEY_DECLARE_FALSE(mte_debug_tco_state);
67 STATIC_IF_KEY_DECLARE_FALSE(mte_panic_on_non_canonical);
68 STATIC_IF_KEY_DECLARE_FALSE(mte_panic_on_async_fault);
69 
70 #define mte_kern_enabled()               probable_static_if(mte_config_kern_enabled)
71 #define mte_kern_data_enabled()          improbable_static_if(mte_config_kern_data_enabled)
72 #define mte_user_enabled()               probable_static_if(mte_config_user_enabled)
73 #define mte_force_all_enabled()          improbable_static_if(mte_config_force_all_enabled)
74 /*
75  * PSTATE.TCO sounds mellow, but can be evil. Tag Check Override is meant to be
76  * a way to briefly disable tag checking during a trusted path. It must not extend
77  * past the path, into unexpected callee/callers. For this reason, we should not incentivize
78  * its use. This option adds some enforcing that TCO is at the state we expect
79  * whenever we try to manipulate it. We intentionally do not implement any form of
80  * save and restore of its state. We don't enable this on release as reading TCO is
81  * a slow operation and would impact performance.
82  */
83 #define mte_debug_tco_state()            improbable_static_if(mte_debug_tco_state)
84 /*
85  * Internal VM state is maintained in canonical form, therefore any non load/store
86  * operation (e.g. locate an entry within a vm_map) must happen in canonical form.
87  * For a selected number of user/kernel VM API, we strip addresses of metadata on
88  * behalf of the user. These are APIs that are normally called by consumers on the
89  * result of e.g. malloc()'ed addresses (which would now be tagged under MTE) and
90  * therefore we strip the address on behalf of the consumer. "Destructive" APIs
91  * and internal VM APIs, though, should never receive a tagged address. This option
92  * turns on panic'ing (rather than graciously failing) for this scenario, to help
93  * squash away bugs.
94  */
95 #define mte_panic_on_non_canonical()     improbable_static_if(mte_panic_on_non_canonical)
96 /*
97  * Software asynchronous tag check faults can be a nightmare to triage. Until we
98  * get better telemetry, allow to trigger a panic rather than just killing the
99  * victim task.
100  */
101 #define mte_panic_on_async_fault()        improbable_static_if(mte_panic_on_async_fault)
102 #else /* DEVELOPMENT || DEBUG */
103 #define mte_kern_enabled()                (true)
104 #define mte_kern_data_enabled()           (false)
105 #define mte_user_enabled()                (true)
106 #define mte_force_all_enabled()           (false)
107 #define mte_debug_tco_state()             (false)
108 #define mte_panic_on_non_canonical()      (false)
109 #define mte_panic_on_async_fault()        (false)
110 #endif /* DEVELOPMENT || DEBUG */
111 
112 /*
113  * Generate a random tag out of default best effort exclude mask.
114  * Assign the generated tag to both pointer and backing storage.
115  */
116 extern caddr_t mte_generate_and_store_tag(caddr_t target, size_t size) __attribute__((overloadable));
117 
118 /*
119  * Generate a random tag out of a caller supplied exclude mask.
120  * Assign the generated tag to both pointer and backing storage.
121  */
122 extern caddr_t mte_generate_and_store_tag(caddr_t target, size_t size, mte_exclude_mask_t mask) __attribute__((overloadable));
123 
124 /*
125  * Disable tag checking over a user task, in order to support MTE soft mode.
126  */
127 extern void mte_disable_user_checking(task_t);
128 
129 /*
130  * MTE bulk operations.
131  * These function allow to read and set (potentially) non-identical sequences
132  * of tags from a given range. They leverage STGM/LDGM, therefore require
133  * 256 bytes alignment.
134  *
135  * They take as parameter the va the operation should start from, the amount of memory
136  * to cover, the source/destination of tags in units of mte_bulk_taglist_t and
137  * the size of the source/destination buffer. Buffer is expected to be exactly matched
138  * to the amount of tags that need to be read/written.
139  */
140 typedef uint64_t mte_bulk_taglist_t;
141 
142 /* LDGM and STGM are privileged instructions. */
143 static inline void
mte_store_tag_256(caddr_t addr,mte_bulk_taglist_t tag_list)144 mte_store_tag_256(caddr_t addr, mte_bulk_taglist_t tag_list)
145 {
146 	__asm__ __volatile__ ("stgm %0, [%1]" : "+r" (tag_list) : "r" (addr) : "memory");
147 }
148 
149 static inline mte_bulk_taglist_t
mte_load_tag_256(caddr_t addr)150 mte_load_tag_256(caddr_t addr)
151 {
152 	mte_bulk_taglist_t tag_list = 0;
153 	__asm__ __volatile__ ("ldgm %0, [%1]" : "+r" (tag_list) : "r" (addr) : "memory");
154 
155 	return tag_list;
156 }
157 
158 #define MTE_BULK_TAGLIST_BUF_SIZE(size)         MTE_SIZE_TO_ATAG_STORAGE(size) / sizeof (mte_bulk_taglist_t)
159 #define MTE_BULK_DECLARE_TAGLIST(name, size)    mte_bulk_taglist_t name[MTE_BULK_TAGLIST_BUF_SIZE(size)]
160 
161 extern void mte_bulk_read_tags(caddr_t va, size_t va_size, mte_bulk_taglist_t * buffer, size_t buf_size);
162 extern void mte_bulk_write_tags(caddr_t va, size_t va_size, mte_bulk_taglist_t * buffer, size_t buf_size);
163 
164 /* Copy tags from one va to the other. Mappings are expected to target different physical addresses */
165 extern void mte_copy_tags(caddr_t dest, caddr_t source, vm_size_t size);
166 
167 /* If panic_on_canonical is enabled, report a detected non-canonical address where unexpected */
168 extern void mte_report_non_canonical_address(caddr_t address, vm_map_t map, const char *location);
169 
170 /* MTE exceptions */
171 extern void mte_guard_ast(thread_t thread, mach_exception_data_type_t code, mach_exception_data_type_t subcode);
172 extern void mte_synthesize_async_tag_check_fault(thread_t thread, vm_map_t map);
173 
174 
175 #endif /* XNU_KERNEL_PRIVATE */
176 
177 __END_DECLS
178 
179 #endif /* HAS_MTE */
180 
181 #endif /* _ARM64_MTE_XNU_H_ */
182