1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef KERNEL_PRIVATE
30 #ifndef _KERN_ZALLOC_RO_SHIM_
31 #define _KERN_ZALLOC_RO_SHIM_
32
33 #include <os/atomic_private.h>
34 #include <kern/zalloc.h>
35
36 /*
37 * This file is provided as a help to shim adoption of read only zones
38 * to provide an A/B driven by a boot-arg.
39 *
40 * In affected files:
41 *
42 * 1. define ALLOW_ZALLOC_RO_SHIM to allow selective RO/non-RO shimming,
43 * and #include <kern/zalloc_ro_shim.h>
44 *
45 * 2. based on typically a boot-arg, decide to pass ZC_READONLY to
46 * zone_create_ro():
47 *
48 * - if ZC_READONLY is absent, then a regular zone is made,
49 * - if ZC_READONLY is passed, then the zone will be read-only and used as
50 * such.
51 *
52 * if ALLOW_ZALLOC_RO_SHIM is not defined, then even when ZC_READONLY
53 * isn't passed, the zone will be forcefully read only.
54 *
55 * 3. in your code, use zone_id_shim_t instead of `zone_id_t` while you still
56 * want to use the RO/non-RO shim.
57 *
58 *
59 * In practice this means that the shimmed code when creating the zone
60 * will look a little like this, and nothing else is otherwise affected
61 * in source:
62 *
63 * <code>
64 * // do this is any translation unit using {zalloc,zfree}_ro_* APIS
65 * #if ALLOW_ZALLOC_RO_SHIM
66 * #include <kern/zalloc_ro_shim.h>
67 * #endif
68 *
69 * SECURITY_READ_ONLY_LATE(zone_id_shim_t) my_zone_id;
70 *
71 * void
72 * some_init_code(void)
73 * {
74 * zone_create_flags_t flags = ZC_NONE;
75 *
76 * #if ALLOW_ZALLOC_RO_SHIM
77 * int use_ro = 0;
78 * PE_parse_boot_arg("my_zone_make_ro", &use_ro, sizeof(use_ro));
79 * if (use_ro) {
80 * flags |= ZC_READONLY;
81 * }
82 * #endif
83 *
84 * my_zone_id = zone_create_ro("my zone", sizeof(struct my_type),
85 * flags, ZC_RO_MY_ZONE_ID);
86 *
87 * // ... more code ...
88 * }
89 * </code>
90 */
91
92 #ifndef ALLOW_ZALLOC_RO_SHIM
93 typedef zone_id_t zone_id_shim_t;
94 #else
95 typedef union {
96 zone_t zone;
97 zone_id_t zid;
98 unsigned long zval;
99 } zone_id_shim_t;
100
101 static inline bool
zone_id_shim_is_ro(zone_id_shim_t z)102 zone_id_shim_is_ro(zone_id_shim_t z)
103 {
104 return z.zval <= UINT16_MAX;
105 }
106
107 static inline zone_id_shim_t
zone_create_ro_shimmed(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)108 zone_create_ro_shimmed(
109 const char *name,
110 vm_size_t size,
111 zone_create_flags_t flags,
112 zone_create_ro_id_t zc_ro_id)
113 {
114 zone_id_shim_t z = {};
115
116 if (flags & ZC_READONLY) {
117 z.zid = zone_create_ro(name, size, flags, zc_ro_id);
118 } else {
119 z.zone = zone_create(name, size, flags);
120 }
121
122 return z;
123 }
124 #define zone_create_ro(name, size, flags, zc_ro_id) \
125 zone_create_ro_shimmed(name, size, flags, zc_ro_id)
126
127 static inline void
zalloc_ro_mut_shimmed(zone_id_shim_t zone_id,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)128 zalloc_ro_mut_shimmed(
129 zone_id_shim_t zone_id,
130 void *elem,
131 vm_offset_t offset,
132 const void *new_data,
133 vm_size_t new_data_size)
134 {
135 if (zone_id_shim_is_ro(zone_id)) {
136 zalloc_ro_mut(zone_id.zid, elem, offset, new_data, new_data_size);
137 } else {
138 memcpy((void *)((vm_offset_t)elem + offset), new_data, new_data_size);
139 }
140 }
141 #define zalloc_ro_mut(zone_id, elem, offset, new_data, new_data_size) \
142 zalloc_ro_mut_shimmed(zone_id, elem, offset, new_data, new_data_size)
143
144 static inline void
zalloc_ro_clear_shimmed(zone_id_shim_t zone_id,void * elem,vm_offset_t offset,vm_size_t size)145 zalloc_ro_clear_shimmed(
146 zone_id_shim_t zone_id,
147 void *elem,
148 vm_offset_t offset,
149 vm_size_t size)
150 {
151 if (zone_id_shim_is_ro(zone_id)) {
152 zalloc_ro_clear(zone_id.zid, elem, offset, size);
153 } else {
154 bzero((void *)((vm_offset_t)elem + offset), size);
155 }
156 }
157 #define zalloc_ro_clear(zone_id, elem, offset, size) \
158 zalloc_ro_clear_shimmed(zone_id, elem, offset, size)
159
160 static inline void *
zalloc_ro_shimmed(zone_id_shim_t zone_id,zalloc_flags_t flags)161 zalloc_ro_shimmed(
162 zone_id_shim_t zone_id,
163 zalloc_flags_t flags)
164 {
165 if (zone_id_shim_is_ro(zone_id)) {
166 return (zalloc_ro)(zone_id.zid, flags);
167 } else {
168 return (zalloc_flags)(zone_id.zone, flags);
169 }
170 }
171 #undef zalloc_ro
172 #define zalloc_ro(zid, flags) \
173 zalloc_ro_shimmed(zid, flags)
174
175 static inline void
zfree_ro_shimmed(zone_id_shim_t zone_id,void * addr)176 zfree_ro_shimmed(
177 zone_id_shim_t zone_id,
178 void *addr)
179 {
180 if (zone_id_shim_is_ro(zone_id)) {
181 (zfree_ro)(zone_id.zid, addr);
182 } else {
183 (zfree)(zone_id.zone, addr);
184 }
185 }
186 #undef zfree_ro
187 #define zfree_ro(zid, elem) ({ \
188 zone_id_shim_t __zfree_zid = (zid); \
189 zfree_ro_shimmed(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
190 })
191
192 static inline uint64_t
__zalloc_ro_mut_atomic_shimmed(vm_offset_t dst,zro_atomic_op_t op,uint64_t value)193 __zalloc_ro_mut_atomic_shimmed(
194 vm_offset_t dst,
195 zro_atomic_op_t op,
196 uint64_t value)
197 {
198 #define __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(op, op2) \
199 case ZRO_ATOMIC_##op##_8: \
200 return os_atomic_##op2((uint8_t *)dst, (uint8_t)value, seq_cst); \
201 case ZRO_ATOMIC_##op##_16: \
202 return os_atomic_##op2((uint16_t *)dst, (uint16_t)value, seq_cst); \
203 case ZRO_ATOMIC_##op##_32: \
204 return os_atomic_##op2((uint32_t *)dst, (uint32_t)value, seq_cst); \
205 case ZRO_ATOMIC_##op##_64: \
206 return os_atomic_##op2((uint64_t *)dst, (uint64_t)value, seq_cst)
207
208 switch (op) {
209 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(OR, or_orig);
210 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(XOR, xor_orig);
211 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(AND, and_orig);
212 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(ADD, add_orig);
213 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(XCHG, xchg);
214 default:
215 panic("%s: Invalid atomic operation: %d", __func__, op);
216 }
217
218 #undef __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP
219 }
220
221 static inline uint64_t
zalloc_ro_mut_atomic_shimmed(zone_id_shim_t zone_id,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)222 zalloc_ro_mut_atomic_shimmed(
223 zone_id_shim_t zone_id,
224 void *elem,
225 vm_offset_t offset,
226 zro_atomic_op_t op,
227 uint64_t value)
228 {
229 if (zone_id_shim_is_ro(zone_id)) {
230 return zalloc_ro_mut_atomic(zone_id.zid, elem, offset, op, value);
231 } else {
232 vm_offset_t ptr = (vm_offset_t)elem + offset;
233 return __zalloc_ro_mut_atomic_shimmed((void *)ptr, op, value);
234 }
235 }
236 #define zalloc_ro_mut_atomic(zone_id, elem, offset, op, value) \
237 zalloc_ro_mut_atomic_shimmed(zone_id, elem, offset, op, value)
238
239 /*
240 * Those are macros/wrappers that will be shimmed naturally:
241 * - zalloc_ro_update_elem,
242 * - zalloc_ro_update_field,
243 * - zalloc_ro_clear_field
244 * - zalloc_ro_update_field_atomic
245 */
246 #endif
247 #endif /* _KERN_ZALLOC_RO_SHIM_ */
248 #endif /* KERNEL_PRIVATE */
249