1 /*
2 * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern.h>
27 #include <kern/zalloc.h>
28 #include <kern/kalloc.h>
29 #include <kern/assert.h>
30 #include <kern/locks.h>
31 #include <kern/lock_rw.h>
32 #include <libkern/libkern.h>
33 #include <libkern/section_keywords.h>
34 #include <libkern/coretrust/coretrust.h>
35 #include <pexpert/pexpert.h>
36 #include <sys/vm.h>
37 #include <sys/proc.h>
38 #include <sys/codesign.h>
39 #include <sys/code_signing.h>
40 #include <uuid/uuid.h>
41 #include <IOKit/IOBSD.h>
42
43 #if PMAP_CS_PPL_MONITOR
44 /*
45 * The Page Protection Layer layer implements the PMAP_CS monitor environment which
46 * provides code signing and memory isolation enforcements for data structures which
47 * are critical to ensuring that all code executed on the system is authorized to do
48 * so.
49 *
50 * Unless the data is managed by the PPL itself, XNU needs to page-align everything,
51 * and then reference the memory as read-only.
52 */
53
54 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
55 extern vm_map_address_t phystokv(pmap_paddr_t pa);
56 extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
57
58 #pragma mark Initialization
59
60 void
code_signing_init()61 code_signing_init()
62 {
63 /* Does nothing */
64 }
65
66 #pragma mark Developer Mode
67
68 SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = &ppl_developer_mode_storage;
69
70 void
ppl_toggle_developer_mode(bool state)71 ppl_toggle_developer_mode(
72 bool state)
73 {
74 pmap_toggle_developer_mode(state);
75 }
76
77 #pragma mark Code Signing and Provisioning Profiles
78
79 bool
ppl_code_signing_enabled(void)80 ppl_code_signing_enabled(void)
81 {
82 return pmap_cs_enabled();
83 }
84
85 kern_return_t
ppl_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)86 ppl_register_provisioning_profile(
87 const void *profile_blob,
88 const size_t profile_blob_size,
89 void **profile_obj)
90 {
91 pmap_profile_payload_t *pmap_payload = NULL;
92 vm_address_t payload_addr = 0;
93 vm_size_t payload_size = 0;
94 vm_size_t payload_size_aligned = 0;
95 kern_return_t ret = KERN_DENIED;
96
97 if (os_add_overflow(sizeof(*pmap_payload), profile_blob_size, &payload_size)) {
98 panic("attempted to load a too-large profile: %lu bytes", profile_blob_size);
99 }
100 payload_size_aligned = round_page(payload_size);
101
102 ret = kmem_alloc(kernel_map, &payload_addr, payload_size_aligned,
103 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
104 if (ret != KERN_SUCCESS) {
105 printf("unable to allocate memory for pmap profile payload: %d\n", ret);
106 goto exit;
107 }
108
109 /* We need to setup the payload before we send it to the PPL */
110 pmap_payload = (pmap_profile_payload_t*)payload_addr;
111
112 pmap_payload->profile_blob_size = profile_blob_size;
113 memcpy(pmap_payload->profile_blob, profile_blob, profile_blob_size);
114
115 ret = pmap_register_provisioning_profile(payload_addr, payload_size_aligned);
116 if (ret == KERN_SUCCESS) {
117 *profile_obj = &pmap_payload->profile_obj_storage;
118 *profile_obj = (pmap_cs_profile_t*)phystokv(kvtophys_nofail((vm_offset_t)*profile_obj));
119 }
120
121 exit:
122 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
123 kmem_free(kernel_map, payload_addr, payload_size_aligned);
124 payload_addr = 0;
125 payload_size_aligned = 0;
126 }
127
128 return ret;
129 }
130
131 kern_return_t
ppl_unregister_provisioning_profile(void * profile_obj)132 ppl_unregister_provisioning_profile(
133 void *profile_obj)
134 {
135 pmap_cs_profile_t *ppl_profile_obj = profile_obj;
136 kern_return_t ret = KERN_DENIED;
137
138 ret = pmap_unregister_provisioning_profile(ppl_profile_obj);
139 if (ret != KERN_SUCCESS) {
140 return ret;
141 }
142
143 /* Get the original payload address */
144 const pmap_profile_payload_t *pmap_payload = ppl_profile_obj->original_payload;
145 const vm_address_t payload_addr = (const vm_address_t)pmap_payload;
146
147 /* Get the original payload size */
148 vm_size_t payload_size = pmap_payload->profile_blob_size + sizeof(*pmap_payload);
149 payload_size = round_page(payload_size);
150
151 /* Free the payload */
152 kmem_free(kernel_map, payload_addr, payload_size);
153 pmap_payload = NULL;
154
155 return KERN_SUCCESS;
156 }
157
158 kern_return_t
ppl_associate_provisioning_profile(void * sig_obj,void * profile_obj)159 ppl_associate_provisioning_profile(
160 void *sig_obj,
161 void *profile_obj)
162 {
163 return pmap_associate_provisioning_profile(sig_obj, profile_obj);
164 }
165
166 kern_return_t
ppl_disassociate_provisioning_profile(void * sig_obj)167 ppl_disassociate_provisioning_profile(
168 void *sig_obj)
169 {
170 return pmap_disassociate_provisioning_profile(sig_obj);
171 }
172
173 void
ppl_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])174 ppl_set_compilation_service_cdhash(
175 const uint8_t cdhash[CS_CDHASH_LEN])
176 {
177 pmap_set_compilation_service_cdhash(cdhash);
178 }
179
180 bool
ppl_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])181 ppl_match_compilation_service_cdhash(
182 const uint8_t cdhash[CS_CDHASH_LEN])
183 {
184 return pmap_match_compilation_service_cdhash(cdhash);
185 }
186
187 void
ppl_set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])188 ppl_set_local_signing_public_key(
189 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
190 {
191 return pmap_set_local_signing_public_key(public_key);
192 }
193
194 uint8_t*
ppl_get_local_signing_public_key(void)195 ppl_get_local_signing_public_key(void)
196 {
197 return pmap_get_local_signing_public_key();
198 }
199
200 void
ppl_unrestrict_local_signing_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])201 ppl_unrestrict_local_signing_cdhash(
202 const uint8_t cdhash[CS_CDHASH_LEN])
203 {
204 pmap_unrestrict_local_signing(cdhash);
205 }
206
207 vm_size_t
ppl_managed_code_signature_size(void)208 ppl_managed_code_signature_size(void)
209 {
210 return pmap_cs_blob_limit;
211 }
212
213 kern_return_t
ppl_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** sig_obj,vm_address_t * ppl_signature_addr)214 ppl_register_code_signature(
215 const vm_address_t signature_addr,
216 const vm_size_t signature_size,
217 const vm_offset_t code_directory_offset,
218 const char *signature_path,
219 void **sig_obj,
220 vm_address_t *ppl_signature_addr)
221 {
222 pmap_cs_code_directory_t *cd_entry = NULL;
223
224 /* PPL doesn't care about the signature path */
225 (void)signature_path;
226
227 kern_return_t ret = pmap_cs_register_code_signature_blob(
228 signature_addr,
229 signature_size,
230 code_directory_offset,
231 (pmap_cs_code_directory_t**)sig_obj);
232
233 if (ret != KERN_SUCCESS) {
234 return ret;
235 }
236 cd_entry = *((pmap_cs_code_directory_t**)sig_obj);
237
238 if (ppl_signature_addr) {
239 *ppl_signature_addr = (vm_address_t)cd_entry->superblob;
240 }
241
242 return KERN_SUCCESS;
243 }
244
245 kern_return_t
ppl_unregister_code_signature(void * sig_obj)246 ppl_unregister_code_signature(
247 void *sig_obj)
248 {
249 return pmap_cs_unregister_code_signature_blob(sig_obj);
250 }
251
252 kern_return_t
ppl_verify_code_signature(void * sig_obj)253 ppl_verify_code_signature(
254 void *sig_obj)
255 {
256 return pmap_cs_verify_code_signature_blob(sig_obj);
257 }
258
259 kern_return_t
ppl_reconstitute_code_signature(void * sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)260 ppl_reconstitute_code_signature(
261 void *sig_obj,
262 vm_address_t *unneeded_addr,
263 vm_size_t *unneeded_size)
264 {
265 return pmap_cs_unlock_unneeded_code_signature(
266 sig_obj,
267 unneeded_addr,
268 unneeded_size);
269 }
270
271 #pragma mark Address Spaces
272
273 kern_return_t
ppl_associate_code_signature(pmap_t pmap,void * sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)274 ppl_associate_code_signature(
275 pmap_t pmap,
276 void *sig_obj,
277 const vm_address_t region_addr,
278 const vm_size_t region_size,
279 const vm_offset_t region_offset)
280 {
281 return pmap_cs_associate(
282 pmap,
283 sig_obj,
284 region_addr,
285 region_size,
286 region_offset);
287 }
288
289 kern_return_t
ppl_allow_jit_region(__unused pmap_t pmap)290 ppl_allow_jit_region(
291 __unused pmap_t pmap)
292 {
293 /* PPL does not support this API */
294 return KERN_NOT_SUPPORTED;
295 }
296
297 kern_return_t
ppl_associate_jit_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)298 ppl_associate_jit_region(
299 pmap_t pmap,
300 const vm_address_t region_addr,
301 const vm_size_t region_size)
302 {
303 return pmap_cs_associate(
304 pmap,
305 PMAP_CS_ASSOCIATE_JIT,
306 region_addr,
307 region_size,
308 0);
309 }
310
311 kern_return_t
ppl_associate_debug_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)312 ppl_associate_debug_region(
313 pmap_t pmap,
314 const vm_address_t region_addr,
315 const vm_size_t region_size)
316 {
317 return pmap_cs_associate(
318 pmap,
319 PMAP_CS_ASSOCIATE_COW,
320 region_addr,
321 region_size,
322 0);
323 }
324
325 kern_return_t
ppl_address_space_debugged(pmap_t pmap)326 ppl_address_space_debugged(
327 pmap_t pmap)
328 {
329 /*
330 * ppl_associate_debug_region is a fairly idempotent function which simply
331 * checks if an address space is already debugged or not and returns a value
332 * based on that. The actual memory region is not inserted into the address
333 * space, so we can pass whatever in this case. The only caveat here though
334 * is that the memory region needs to be page-aligned and cannot be NULL.
335 */
336 return ppl_associate_debug_region(pmap, PAGE_SIZE, PAGE_SIZE);
337 }
338
339 kern_return_t
ppl_allow_invalid_code(pmap_t pmap)340 ppl_allow_invalid_code(
341 pmap_t pmap)
342 {
343 return pmap_cs_allow_invalid(pmap);
344 }
345
346 kern_return_t
ppl_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)347 ppl_get_trust_level_kdp(
348 pmap_t pmap,
349 uint32_t *trust_level)
350 {
351 return pmap_get_trust_level_kdp(pmap, trust_level);
352 }
353
354 kern_return_t
ppl_address_space_exempt(const pmap_t pmap)355 ppl_address_space_exempt(
356 const pmap_t pmap)
357 {
358 if (pmap_performs_stage2_translations(pmap) == true) {
359 return KERN_SUCCESS;
360 }
361
362 return KERN_DENIED;
363 }
364
365 kern_return_t
ppl_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)366 ppl_fork_prepare(
367 pmap_t old_pmap,
368 pmap_t new_pmap)
369 {
370 return pmap_cs_fork_prepare(old_pmap, new_pmap);
371 }
372
373 kern_return_t
ppl_acquire_signing_identifier(const void * sig_obj,const char ** signing_id)374 ppl_acquire_signing_identifier(
375 const void *sig_obj,
376 const char **signing_id)
377 {
378 const pmap_cs_code_directory_t *cd_entry = sig_obj;
379
380 /* If we reach here, the identifier must have been setup */
381 assert(cd_entry->identifier != NULL);
382
383 if (signing_id) {
384 *signing_id = cd_entry->identifier;
385 }
386
387 return KERN_SUCCESS;
388 }
389
390 #pragma mark Entitlements
391
392 kern_return_t
ppl_associate_kernel_entitlements(void * sig_obj,const void * kernel_entitlements)393 ppl_associate_kernel_entitlements(
394 void *sig_obj,
395 const void *kernel_entitlements)
396 {
397 pmap_cs_code_directory_t *cd_entry = sig_obj;
398 return pmap_associate_kernel_entitlements(cd_entry, kernel_entitlements);
399 }
400
401 kern_return_t
ppl_resolve_kernel_entitlements(pmap_t pmap,const void ** kernel_entitlements)402 ppl_resolve_kernel_entitlements(
403 pmap_t pmap,
404 const void **kernel_entitlements)
405 {
406 kern_return_t ret = KERN_DENIED;
407 const void *entitlements = NULL;
408
409 ret = pmap_resolve_kernel_entitlements(pmap, &entitlements);
410 if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
411 *kernel_entitlements = entitlements;
412 }
413
414 return ret;
415 }
416
417 kern_return_t
ppl_accelerate_entitlements(void * sig_obj,CEQueryContext_t * ce_ctx)418 ppl_accelerate_entitlements(
419 void *sig_obj,
420 CEQueryContext_t *ce_ctx)
421 {
422 pmap_cs_code_directory_t *cd_entry = sig_obj;
423 kern_return_t ret = KERN_DENIED;
424
425 ret = pmap_accelerate_entitlements(cd_entry);
426
427 /*
428 * We only ever get KERN_ABORTED when we cannot accelerate the entitlements
429 * because it would consume too much memory. In this case, we still want to
430 * return the ce_ctx since we don't want the system to fall-back to non-PPL
431 * locked down memory, so we switch this to a success case.
432 */
433 if (ret == KERN_ABORTED) {
434 ret = KERN_SUCCESS;
435 }
436
437 /* Return the accelerated context to the caller */
438 if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
439 *ce_ctx = cd_entry->ce_ctx;
440 }
441
442 return ret;
443 }
444
445 #pragma mark Image4
446
447 void*
ppl_image4_storage_data(size_t * allocated_size)448 ppl_image4_storage_data(
449 size_t *allocated_size)
450 {
451 return pmap_image4_pmap_data(allocated_size);
452 }
453
454 void
ppl_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)455 ppl_image4_set_nonce(
456 const img4_nonce_domain_index_t ndi,
457 const img4_nonce_t *nonce)
458 {
459 return pmap_image4_set_nonce(ndi, nonce);
460 }
461
462 void
ppl_image4_roll_nonce(const img4_nonce_domain_index_t ndi)463 ppl_image4_roll_nonce(
464 const img4_nonce_domain_index_t ndi)
465 {
466 return pmap_image4_roll_nonce(ndi);
467 }
468
469 errno_t
ppl_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)470 ppl_image4_copy_nonce(
471 const img4_nonce_domain_index_t ndi,
472 img4_nonce_t *nonce_out)
473 {
474 return pmap_image4_copy_nonce(ndi, nonce_out);
475 }
476
477 errno_t
ppl_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)478 ppl_image4_execute_object(
479 img4_runtime_object_spec_index_t obj_spec_index,
480 const img4_buff_t *payload,
481 const img4_buff_t *manifest)
482 {
483 errno_t err = EINVAL;
484 kern_return_t kr = KERN_DENIED;
485 img4_buff_t payload_aligned = IMG4_BUFF_INIT;
486 img4_buff_t manifest_aligned = IMG4_BUFF_INIT;
487 vm_address_t payload_addr = 0;
488 vm_size_t payload_len_aligned = 0;
489 vm_address_t manifest_addr = 0;
490 vm_size_t manifest_len_aligned = 0;
491
492 if (payload == NULL) {
493 printf("invalid object execution request: no payload\n");
494 goto out;
495 }
496
497 /*
498 * The PPL will attempt to lockdown both the payload and the manifest before executing
499 * the object. In order for that to happen, both the artifacts need to be page-aligned.
500 */
501 payload_len_aligned = round_page(payload->i4b_len);
502 if (manifest != NULL) {
503 manifest_len_aligned = round_page(manifest->i4b_len);
504 }
505
506 kr = kmem_alloc(
507 kernel_map,
508 &payload_addr,
509 payload_len_aligned,
510 KMA_KOBJECT,
511 VM_KERN_MEMORY_SECURITY);
512
513 if (kr != KERN_SUCCESS) {
514 printf("unable to allocate memory for image4 payload: %d\n", kr);
515 err = ENOMEM;
516 goto out;
517 }
518
519 /* Copy in the payload */
520 memcpy((uint8_t*)payload_addr, payload->i4b_bytes, payload->i4b_len);
521
522 /* Construct the aligned payload buffer */
523 payload_aligned.i4b_bytes = (uint8_t*)payload_addr;
524 payload_aligned.i4b_len = payload->i4b_len;
525
526 if (manifest != NULL) {
527 kr = kmem_alloc(
528 kernel_map,
529 &manifest_addr,
530 manifest_len_aligned,
531 KMA_KOBJECT,
532 VM_KERN_MEMORY_SECURITY);
533
534 if (kr != KERN_SUCCESS) {
535 printf("unable to allocate memory for image4 manifest: %d\n", kr);
536 err = ENOMEM;
537 goto out;
538 }
539
540 /* Construct the aligned manifest buffer */
541 manifest_aligned.i4b_bytes = (uint8_t*)manifest_addr;
542 manifest_aligned.i4b_len = manifest->i4b_len;
543
544 /* Copy in the manifest */
545 memcpy((uint8_t*)manifest_addr, manifest->i4b_bytes, manifest->i4b_len);
546 }
547
548 err = pmap_image4_execute_object(obj_spec_index, &payload_aligned, &manifest_aligned);
549 if (err != 0) {
550 printf("unable to execute image4 object: %d\n", err);
551 goto out;
552 }
553
554 out:
555 /* We always free the manifest as it isn't required anymore */
556 if (manifest_addr != 0) {
557 kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
558 manifest_addr = 0;
559 manifest_len_aligned = 0;
560 }
561
562 /* If we encountered an error -- free the allocated payload */
563 if ((err != 0) && (payload_addr != 0)) {
564 kmem_free(kernel_map, payload_addr, payload_len_aligned);
565 payload_addr = 0;
566 payload_len_aligned = 0;
567 }
568
569 return err;
570 }
571
572 errno_t
ppl_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)573 ppl_image4_copy_object(
574 img4_runtime_object_spec_index_t obj_spec_index,
575 vm_address_t object_out,
576 size_t *object_length)
577 {
578 errno_t err = EINVAL;
579 kern_return_t kr = KERN_DENIED;
580 vm_address_t object_addr = 0;
581 vm_size_t object_len_aligned = 0;
582
583 if (object_out == 0) {
584 printf("invalid object copy request: no object input buffer\n");
585 goto out;
586 } else if (object_length == NULL) {
587 printf("invalid object copy request: no object input length\n");
588 goto out;
589 }
590
591 /*
592 * The PPL will attempt to pin the input buffer in order to ensure that the kernel
593 * didn't pass in PPL-owned buffers. The PPL cannot pin the same page more than once,
594 * and attempting to do so will panic the system. Hence, we allocate fresh pages for
595 * for the PPL to pin.
596 *
597 * We can send in the address for the length pointer since that is allocated on the
598 * stack, so the PPL can pin our stack for the duration of the call as no other
599 * thread can be using our stack, meaning the PPL will never attempt to double-pin
600 * the page.
601 */
602 object_len_aligned = round_page(*object_length);
603
604 kr = kmem_alloc(
605 kernel_map,
606 &object_addr,
607 object_len_aligned,
608 KMA_KOBJECT,
609 VM_KERN_MEMORY_SECURITY);
610
611 if (kr != KERN_SUCCESS) {
612 printf("unable to allocate memory for image4 object: %d\n", kr);
613 err = ENOMEM;
614 goto out;
615 }
616
617 err = pmap_image4_copy_object(obj_spec_index, object_addr, object_length);
618 if (err != 0) {
619 printf("unable to copy image4 object: %d\n", err);
620 goto out;
621 }
622
623 /* Copy the data back into the caller passed buffer */
624 memcpy((void*)object_out, (void*)object_addr, *object_length);
625
626 out:
627 /* We don't ever need to keep around our page-aligned buffer */
628 if (object_addr != 0) {
629 kmem_free(kernel_map, object_addr, object_len_aligned);
630 object_addr = 0;
631 object_len_aligned = 0;
632 }
633
634 return err;
635 }
636
637 const void*
ppl_image4_get_monitor_exports(void)638 ppl_image4_get_monitor_exports(void)
639 {
640 /*
641 * AppleImage4 can query the PMAP_CS runtime on its own since the PMAP_CS
642 * runtime is compiled within the kernel extension itself. As a result, we
643 * never expect this KPI to be called when the system uses the PPL monitor.
644 */
645
646 printf("explicit monitor-exports-get not required for the PPL\n");
647 return NULL;
648 }
649
650 errno_t
ppl_image4_set_release_type(__unused const char * release_type)651 ppl_image4_set_release_type(
652 __unused const char *release_type)
653 {
654 /*
655 * AppleImage4 stores the release type in the CTRR protected memory region
656 * of its kernel extension. This is accessible by the PMAP_CS runtime as the
657 * runtime is compiled alongside the kernel extension. As a result, we never
658 * expect this KPI to be called when the system uses the PPL monitor.
659 */
660
661 printf("explicit release-type-set set not required for the PPL\n");
662 return ENOTSUP;
663 }
664
665 errno_t
ppl_image4_set_bnch_shadow(__unused const img4_nonce_domain_index_t ndi)666 ppl_image4_set_bnch_shadow(
667 __unused const img4_nonce_domain_index_t ndi)
668 {
669 /*
670 * AppleImage4 stores the BNCH shadow in the CTRR protected memory region
671 * of its kernel extension. This is accessible by the PMAP_CS runtime as the
672 * runtime is compiled alongside the kernel extension. As a result, we never
673 * expect this KPI to be called when the system uses the PPL monitor.
674 */
675
676 printf("explicit BNCH-shadow-set not required for the PPL\n");
677 return ENOTSUP;
678 }
679
680 #endif /* PMAP_CS_PPL_MONITOR */
681