1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <kern/zalloc.h>
30 #include <kern/kalloc.h>
31 #include <kern/assert.h>
32 #include <kern/lock_rw.h>
33 #include <libkern/libkern.h>
34 #include <libkern/section_keywords.h>
35 #include <libkern/coretrust/coretrust.h>
36 #include <sys/vm.h>
37 #include <sys/proc.h>
38 #include <sys/codesign.h>
39 #include <sys/code_signing.h>
40 #include <uuid/uuid.h>
41 #include <IOKit/IOBSD.h>
42
43 #if CODE_SIGNING_MONITOR
44 /*
45 * Any set of definitions and functions which are only needed when we have a monitor
46 * environment available should go under this if-guard.
47 */
48
49 #if PMAP_CS_PPL_MONITOR
50 /* All good */
51 #else
52 #error "CODE_SIGNING_MONITOR defined without an available monitor"
53 #endif
54
55 typedef uint64_t pmap_paddr_t;
56 extern vm_map_address_t phystokv(pmap_paddr_t pa);
57 extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
58
59 #endif /* CODE_SIGNING_MONITOR */
60
61 #if PMAP_CS_PPL_MONITOR
62 /*
63 * We have the Page Protection Layer environment available. All of our artifacts
64 * need to be page-aligned. The PPL will lockdown the artifacts before it begins
65 * the validation.
66 */
67
68 SECURITY_READ_ONLY_EARLY(static bool*) developer_mode_enabled = &ppl_developer_mode_storage;
69
70 static void
ppl_toggle_developer_mode(bool state)71 ppl_toggle_developer_mode(
72 bool state)
73 {
74 pmap_toggle_developer_mode(state);
75 }
76
77 static kern_return_t
ppl_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)78 ppl_register_provisioning_profile(
79 const void *profile_blob,
80 const size_t profile_blob_size,
81 void **profile_obj)
82 {
83 pmap_profile_payload_t *pmap_payload = NULL;
84 vm_address_t payload_addr = 0;
85 vm_size_t payload_size = 0;
86 vm_size_t payload_size_aligned = 0;
87 kern_return_t ret = KERN_DENIED;
88
89 if (os_add_overflow(sizeof(*pmap_payload), profile_blob_size, &payload_size)) {
90 panic("attempted to load a too-large profile: %lu bytes", profile_blob_size);
91 }
92 payload_size_aligned = round_page(payload_size);
93
94 ret = kmem_alloc(kernel_map, &payload_addr, payload_size_aligned,
95 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
96 if (ret != KERN_SUCCESS) {
97 printf("unable to allocate memory for pmap profile payload: %d\n", ret);
98 goto exit;
99 }
100
101 /* We need to setup the payload before we send it to the PPL */
102 pmap_payload = (pmap_profile_payload_t*)payload_addr;
103
104 pmap_payload->profile_blob_size = profile_blob_size;
105 memcpy(pmap_payload->profile_blob, profile_blob, profile_blob_size);
106
107 ret = pmap_register_provisioning_profile(payload_addr, payload_size_aligned);
108 if (ret == KERN_SUCCESS) {
109 *profile_obj = &pmap_payload->profile_obj_storage;
110 *profile_obj = (pmap_cs_profile_t*)phystokv(kvtophys_nofail((vm_offset_t)*profile_obj));
111 }
112
113 exit:
114 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
115 kmem_free(kernel_map, payload_addr, payload_size_aligned);
116 payload_addr = 0;
117 payload_size_aligned = 0;
118 }
119
120 return ret;
121 }
122
123 static kern_return_t
ppl_unregister_provisioning_profile(pmap_cs_profile_t * profile_obj)124 ppl_unregister_provisioning_profile(
125 pmap_cs_profile_t *profile_obj)
126 {
127 kern_return_t ret = KERN_DENIED;
128
129 ret = pmap_unregister_provisioning_profile(profile_obj);
130 if (ret != KERN_SUCCESS) {
131 return ret;
132 }
133
134 /* Get the original payload address */
135 const pmap_profile_payload_t *pmap_payload = profile_obj->original_payload;
136 const vm_address_t payload_addr = (const vm_address_t)pmap_payload;
137
138 /* Get the original payload size */
139 vm_size_t payload_size = pmap_payload->profile_blob_size + sizeof(*pmap_payload);
140 payload_size = round_page(payload_size);
141
142 /* Free the payload */
143 kmem_free(kernel_map, payload_addr, payload_size);
144 pmap_payload = NULL;
145
146 return KERN_SUCCESS;
147 }
148
149 static kern_return_t
ppl_associate_provisioning_profile(pmap_cs_code_directory_t * sig_obj,pmap_cs_profile_t * profile_obj)150 ppl_associate_provisioning_profile(
151 pmap_cs_code_directory_t *sig_obj,
152 pmap_cs_profile_t *profile_obj)
153 {
154 if (pmap_cs_enabled() == false) {
155 return KERN_SUCCESS;
156 }
157
158 return pmap_associate_provisioning_profile(sig_obj, profile_obj);
159 }
160
161 static kern_return_t
ppl_disassociate_provisioning_profile(pmap_cs_code_directory_t * sig_obj)162 ppl_disassociate_provisioning_profile(
163 pmap_cs_code_directory_t *sig_obj)
164 {
165 if (pmap_cs_enabled() == false) {
166 return KERN_SUCCESS;
167 }
168
169 return pmap_disassociate_provisioning_profile(sig_obj);
170 }
171
172 #else
173 /*
174 * We don't have a monitor environment available. This means someone with a kernel
175 * memory exploit will be able to corrupt code signing state. There is not much we
176 * can do here, since this is older HW.
177 */
178
179 static bool developer_mode_storage = true;
180 SECURITY_READ_ONLY_EARLY(static bool*) developer_mode_enabled = &developer_mode_storage;
181
182 static void
xnu_toggle_developer_mode(bool state)183 xnu_toggle_developer_mode(
184 bool state)
185 {
186 /* No extra validation needed within XNU */
187 os_atomic_store(developer_mode_enabled, state, release);
188 }
189
190 #endif /* */
191
192 #pragma mark Developer Mode
193 /*
194 * AMFI always depends on XNU to extract the state of developer mode on the system. In
195 * cases when we have a monitor, the state is stored within protected monitor memory.
196 */
197
198 void
enable_developer_mode(void)199 enable_developer_mode(void)
200 {
201 #if PMAP_CS_PPL_MONITOR
202 ppl_toggle_developer_mode(true);
203 #else
204 xnu_toggle_developer_mode(true);
205 #endif
206 }
207
208 void
disable_developer_mode(void)209 disable_developer_mode(void)
210 {
211 #if PMAP_CS_PPL_MONITOR
212 ppl_toggle_developer_mode(false);
213 #else
214 xnu_toggle_developer_mode(false);
215 #endif
216 }
217
218 bool
developer_mode_state(void)219 developer_mode_state(void)
220 {
221 /* Assume true if the pointer isn't setup */
222 if (developer_mode_enabled == NULL) {
223 return true;
224 }
225
226 return os_atomic_load(developer_mode_enabled, acquire);
227 }
228
229 #pragma mark Provisioning Profiles
230 /*
231 * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
232 * profiles when we have a monitor since the monitor needs to independently verify the
233 * profile data as well.
234 */
235
236 void
garbage_collect_provisioning_profiles(void)237 garbage_collect_provisioning_profiles(void)
238 {
239 #if CODE_SIGNING_MONITOR
240 free_provisioning_profiles();
241 #endif
242 }
243
244 #if CODE_SIGNING_MONITOR
245
246 /* Structure used to maintain the set of registered profiles on the system */
247 typedef struct _cs_profile {
248 /* The UUID of the registered profile */
249 uuid_t profile_uuid;
250
251 /* The profile validation object from the monitor */
252 void *profile_obj;
253
254 /*
255 * In order to minimize the number of times the same profile would need to be
256 * registered, we allow frequently used profiles to skip the garbage collector
257 * for one pass.
258 */
259 bool skip_collector;
260
261 /* Linked list linkage */
262 SLIST_ENTRY(_cs_profile) link;
263 } cs_profile_t;
264
265 /* Linked list head for registered profiles */
266 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
267
268 /* Lock for the provisioning profiles */
269 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
270 decl_lck_rw_data(, profiles_lock);
271
272 void
initialize_provisioning_profiles(void)273 initialize_provisioning_profiles(void)
274 {
275 /* Ensure the CoreTrust kernel extension has loaded */
276 if (coretrust == NULL) {
277 panic("coretrust interface not available");
278 }
279
280 /* Initialize the provisoning profiles lock */
281 lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
282 printf("initialized XNU provisioning profile data\n");
283
284 #if PMAP_CS_PPL_MONITOR
285 pmap_initialize_provisioning_profiles();
286 #endif
287 }
288
289 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)290 search_for_profile_uuid(
291 const uuid_t profile_uuid)
292 {
293 cs_profile_t *profile = NULL;
294
295 /* Caller is required to acquire the lock */
296 lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
297
298 SLIST_FOREACH(profile, &all_profiles, link) {
299 if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
300 return profile;
301 }
302 }
303
304 return NULL;
305 }
306
307 kern_return_t
register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)308 register_provisioning_profile(
309 const uuid_t profile_uuid,
310 const void *profile_blob,
311 const size_t profile_blob_size)
312 {
313 cs_profile_t *profile = NULL;
314 void *monitor_profile_obj = NULL;
315 kern_return_t ret = KERN_DENIED;
316
317 /* Allocate storage for the profile wrapper object */
318 profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
319 assert(profile != NULL);
320
321 /* Lock the profile set exclusively */
322 lck_rw_lock_exclusive(&profiles_lock);
323
324 /* Check to make sure this isn't a duplicate UUID */
325 cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
326 if (dup_profile != NULL) {
327 /* This profile might be used soon -- skip garbage collector */
328 dup_profile->skip_collector = true;
329
330 ret = KERN_ALREADY_IN_SET;
331 goto exit;
332 }
333
334 #if PMAP_CS_PPL_MONITOR
335 ret = ppl_register_provisioning_profile(profile_blob, profile_blob_size, &monitor_profile_obj);
336 #endif
337
338 if (ret == KERN_SUCCESS) {
339 /* Copy in the profile UUID */
340 uuid_copy(profile->profile_uuid, profile_uuid);
341
342 /* Setup the monitor's profile object */
343 profile->profile_obj = monitor_profile_obj;
344
345 /* This profile might be used soon -- skip garbage collector */
346 profile->skip_collector = true;
347
348 /* Insert at the head of the profile set */
349 SLIST_INSERT_HEAD(&all_profiles, profile, link);
350 }
351
352 exit:
353 /* Unlock the profile set */
354 lck_rw_unlock_exclusive(&profiles_lock);
355
356 if (ret != KERN_SUCCESS) {
357 /* Free the profile wrapper object */
358 kfree_type(cs_profile_t, profile);
359 profile = NULL;
360
361 if (ret != KERN_ALREADY_IN_SET) {
362 printf("unable to register profile with monitor: %d\n", ret);
363 }
364 }
365
366 return ret;
367 }
368
369 kern_return_t
associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)370 associate_provisioning_profile(
371 void *monitor_sig_obj,
372 const uuid_t profile_uuid)
373 {
374 cs_profile_t *profile = NULL;
375 kern_return_t ret = KERN_DENIED;
376
377 /* Lock the profile set as shared */
378 lck_rw_lock_shared(&profiles_lock);
379
380 /* Search for the provisioning profile */
381 profile = search_for_profile_uuid(profile_uuid);
382 if (profile == NULL) {
383 ret = KERN_NOT_FOUND;
384 goto exit;
385 }
386
387 #if PMAP_CS_PPL_MONITOR
388 ret = ppl_associate_provisioning_profile(monitor_sig_obj, profile->profile_obj);
389 #endif
390
391 if (ret == KERN_SUCCESS) {
392 /*
393 * This seems like an active profile -- let it skip the garbage collector on
394 * the next pass. We can modify this field even though we've only taken a shared
395 * lock as in this case we're always setting it to a fixed value.
396 */
397 profile->skip_collector = true;
398 }
399
400 exit:
401 /* Unlock the profile set */
402 lck_rw_unlock_shared(&profiles_lock);
403
404 if (ret != KERN_SUCCESS) {
405 printf("unable to associate profile: %d\n", ret);
406 }
407 return ret;
408 }
409
410 kern_return_t
disassociate_provisioning_profile(void * monitor_sig_obj)411 disassociate_provisioning_profile(
412 void *monitor_sig_obj)
413 {
414 kern_return_t ret = KERN_DENIED;
415
416 #if PMAP_CS_PPL_MONITOR
417 ret = ppl_disassociate_provisioning_profile(monitor_sig_obj);
418 #endif
419
420 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
421 printf("unable to disassociate profile: %d\n", ret);
422 }
423 return ret;
424 }
425
426 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)427 unregister_provisioning_profile(
428 cs_profile_t *profile)
429 {
430 kern_return_t ret = KERN_DENIED;
431
432 #if PMAP_CS_PPL_MONITOR
433 ret = ppl_unregister_provisioning_profile(profile->profile_obj);
434 #endif
435
436 /*
437 * KERN_FAILURE represents the case when the unregistration failed because the
438 * monitor noted that the profile was still being used. Other than that, there
439 * is no other error expected out of this interface. In fact, there is no easy
440 * way to deal with other errors, as the profile state may be corrupted. If we
441 * see a different error, then we panic.
442 */
443 if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
444 panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
445 }
446
447 return ret;
448 }
449
450 void
free_provisioning_profiles(void)451 free_provisioning_profiles(void)
452 {
453 kern_return_t ret = KERN_DENIED;
454 cs_profile_t *profile = NULL;
455 cs_profile_t *temp_profile = NULL;
456
457 /* Lock the profile set exclusively */
458 lck_rw_lock_exclusive(&profiles_lock);
459
460 SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
461 if (profile->skip_collector == true) {
462 profile->skip_collector = false;
463 continue;
464 }
465
466 /* Attempt to unregister this profile from the system */
467 ret = unregister_provisioning_profile(profile);
468 if (ret == KERN_SUCCESS) {
469 /* Remove the profile from the profile set */
470 SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
471
472 /* Free the memory consumed for the profile wrapper object */
473 kfree_type(cs_profile_t, profile);
474 profile = NULL;
475 }
476 }
477
478 /* Unlock the profile set */
479 lck_rw_unlock_exclusive(&profiles_lock);
480 }
481
482 #endif /* CODE_SIGNING_MONITOR */
483