1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifdef KERNEL_PRIVATE
30 #ifndef _KERN_ZALLOC_RO_SHIM_
31 #define _KERN_ZALLOC_RO_SHIM_
32
33 #define OS_ATOMIC_CONFIG_STARVATION_FREE_ONLY 0
34 #include <os/atomic_private.h>
35 #include <kern/zalloc.h>
36
37 /*
38 * This file is provided as a help to shim adoption of read only zones
39 * to provide an A/B driven by a boot-arg.
40 *
41 * In affected files:
42 *
43 * 1. define ALLOW_ZALLOC_RO_SHIM to allow selective RO/non-RO shimming,
44 * and #include <kern/zalloc_ro_shim.h>
45 *
46 * 2. based on typically a boot-arg, decide to pass ZC_READONLY to
47 * zone_create_ro():
48 *
49 * - if ZC_READONLY is absent, then a regular zone is made,
50 * - if ZC_READONLY is passed, then the zone will be read-only and used as
51 * such.
52 *
53 * if ALLOW_ZALLOC_RO_SHIM is not defined, then even when ZC_READONLY
54 * isn't passed, the zone will be forcefully read only.
55 *
56 * 3. in your code, use zone_id_shim_t instead of `zone_id_t` while you still
57 * want to use the RO/non-RO shim.
58 *
59 *
60 * In practice this means that the shimmed code when creating the zone
61 * will look a little like this, and nothing else is otherwise affected
62 * in source:
63 *
64 * <code>
65 * // do this is any translation unit using {zalloc,zfree}_ro_* APIS
66 * #if ALLOW_ZALLOC_RO_SHIM
67 * #include <kern/zalloc_ro_shim.h>
68 * #endif
69 *
70 * SECURITY_READ_ONLY_LATE(zone_id_shim_t) my_zone_id;
71 *
72 * void
73 * some_init_code(void)
74 * {
75 * zone_create_flags_t flags = ZC_NONE;
76 *
77 * #if ALLOW_ZALLOC_RO_SHIM
78 * int use_ro = 0;
79 * PE_parse_boot_arg("my_zone_make_ro", &use_ro, sizeof(use_ro));
80 * if (use_ro) {
81 * flags |= ZC_READONLY;
82 * }
83 * #endif
84 *
85 * my_zone_id = zone_create_ro("my zone", sizeof(struct my_type),
86 * flags, ZC_RO_MY_ZONE_ID);
87 *
88 * // ... more code ...
89 * }
90 * </code>
91 */
92
93 #ifndef ALLOW_ZALLOC_RO_SHIM
94 typedef zone_id_t zone_id_shim_t;
95 #else
96 typedef union {
97 zone_t zone;
98 zone_id_t zid;
99 unsigned long zval;
100 } zone_id_shim_t;
101
102 static inline bool
zone_id_shim_is_ro(zone_id_shim_t z)103 zone_id_shim_is_ro(zone_id_shim_t z)
104 {
105 return z.zval <= UINT16_MAX;
106 }
107
108 static inline zone_id_shim_t
zone_create_ro_shimmed(const char * name,vm_size_t size,zone_create_flags_t flags,zone_create_ro_id_t zc_ro_id)109 zone_create_ro_shimmed(
110 const char *name,
111 vm_size_t size,
112 zone_create_flags_t flags,
113 zone_create_ro_id_t zc_ro_id)
114 {
115 zone_id_shim_t z = {};
116
117 if (flags & ZC_READONLY) {
118 z.zid = zone_create_ro(name, size, flags, zc_ro_id);
119 } else {
120 z.zone = zone_create(name, size, flags);
121 }
122
123 return z;
124 }
125 #define zone_create_ro(name, size, flags, zc_ro_id) \
126 zone_create_ro_shimmed(name, size, flags, zc_ro_id)
127
128 static inline void
zalloc_ro_mut_shimmed(zone_id_shim_t zone_id,void * elem,vm_offset_t offset,const void * new_data,vm_size_t new_data_size)129 zalloc_ro_mut_shimmed(
130 zone_id_shim_t zone_id,
131 void *elem,
132 vm_offset_t offset,
133 const void *new_data,
134 vm_size_t new_data_size)
135 {
136 if (zone_id_shim_is_ro(zone_id)) {
137 zalloc_ro_mut(zone_id.zid, elem, offset, new_data, new_data_size);
138 } else {
139 memcpy((void *)((vm_offset_t)elem + offset), new_data, new_data_size);
140 }
141 }
142 #define zalloc_ro_mut(zone_id, elem, offset, new_data, new_data_size) \
143 zalloc_ro_mut_shimmed(zone_id, elem, offset, new_data, new_data_size)
144
145 static inline void
zalloc_ro_clear_shimmed(zone_id_shim_t zone_id,void * elem,vm_offset_t offset,vm_size_t size)146 zalloc_ro_clear_shimmed(
147 zone_id_shim_t zone_id,
148 void *elem,
149 vm_offset_t offset,
150 vm_size_t size)
151 {
152 if (zone_id_shim_is_ro(zone_id)) {
153 zalloc_ro_clear(zone_id.zid, elem, offset, size);
154 } else {
155 bzero((void *)((vm_offset_t)elem + offset), size);
156 }
157 }
158 #define zalloc_ro_clear(zone_id, elem, offset, size) \
159 zalloc_ro_clear_shimmed(zone_id, elem, offset, size)
160
161 static inline void *
zalloc_ro_shimmed(zone_id_shim_t zone_id,zalloc_flags_t flags)162 zalloc_ro_shimmed(
163 zone_id_shim_t zone_id,
164 zalloc_flags_t flags)
165 {
166 if (zone_id_shim_is_ro(zone_id)) {
167 return (zalloc_ro)(zone_id.zid, flags);
168 } else {
169 return (zalloc_flags)(zone_id.zone, flags);
170 }
171 }
172 #undef zalloc_ro
173 #define zalloc_ro(zid, flags) \
174 zalloc_ro_shimmed(zid, flags)
175
176 static inline void
zfree_ro_shimmed(zone_id_shim_t zone_id,void * addr)177 zfree_ro_shimmed(
178 zone_id_shim_t zone_id,
179 void *addr)
180 {
181 if (zone_id_shim_is_ro(zone_id)) {
182 (zfree_ro)(zone_id.zid, addr);
183 } else {
184 (zfree)(zone_id.zone, addr);
185 }
186 }
187 #undef zfree_ro
188 #define zfree_ro(zid, elem) ({ \
189 zone_id_shim_t __zfree_zid = (zid); \
190 zfree_ro_shimmed(__zfree_zid, (void *)os_ptr_load_and_erase(elem)); \
191 })
192
193 static inline uint64_t
__zalloc_ro_mut_atomic_shimmed(vm_offset_t dst,zro_atomic_op_t op,uint64_t value)194 __zalloc_ro_mut_atomic_shimmed(
195 vm_offset_t dst,
196 zro_atomic_op_t op,
197 uint64_t value)
198 {
199 #define __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(op, op2) \
200 case ZRO_ATOMIC_##op##_8: \
201 return os_atomic_##op2((uint8_t *)dst, (uint8_t)value, seq_cst); \
202 case ZRO_ATOMIC_##op##_16: \
203 return os_atomic_##op2((uint16_t *)dst, (uint16_t)value, seq_cst); \
204 case ZRO_ATOMIC_##op##_32: \
205 return os_atomic_##op2((uint32_t *)dst, (uint32_t)value, seq_cst); \
206 case ZRO_ATOMIC_##op##_64: \
207 return os_atomic_##op2((uint64_t *)dst, (uint64_t)value, seq_cst)
208
209 switch (op) {
210 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(OR, or_orig);
211 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(XOR, xor_orig);
212 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(AND, and_orig);
213 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(ADD, add_orig);
214 __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP(XCHG, xchg);
215 default:
216 panic("%s: Invalid atomic operation: %d", __func__, op);
217 }
218
219 #undef __ZALLOC_RO_MUT_ATOMIC_SHIMMED_OP
220 }
221
222 static inline uint64_t
zalloc_ro_mut_atomic_shimmed(zone_id_shim_t zone_id,void * elem,vm_offset_t offset,zro_atomic_op_t op,uint64_t value)223 zalloc_ro_mut_atomic_shimmed(
224 zone_id_shim_t zone_id,
225 void *elem,
226 vm_offset_t offset,
227 zro_atomic_op_t op,
228 uint64_t value)
229 {
230 if (zone_id_shim_is_ro(zone_id)) {
231 return zalloc_ro_mut_atomic(zone_id.zid, elem, offset, op, value);
232 } else {
233 vm_offset_t ptr = (vm_offset_t)elem + offset;
234 return __zalloc_ro_mut_atomic_shimmed((void *)ptr, op, value);
235 }
236 }
237 #define zalloc_ro_mut_atomic(zone_id, elem, offset, op, value) \
238 zalloc_ro_mut_atomic_shimmed(zone_id, elem, offset, op, value)
239
240 /*
241 * Those are macros/wrappers that will be shimmed naturally:
242 * - zalloc_ro_update_elem,
243 * - zalloc_ro_update_field,
244 * - zalloc_ro_clear_field
245 * - zalloc_ro_update_field_atomic
246 */
247 #endif
248 #endif /* _KERN_ZALLOC_RO_SHIM_ */
249 #endif /* KERNEL_PRIVATE */
250