1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Secure kernel coredump support.
31 *
32 * Actual coredump is a physical memory footprint of secure kernel's memory.
33 * The whole macho is written by using PA == VA mappings for its segments.
34 * It is required to use cL4's scripted process plugin to reconstruct VA address
35 * spaces back.
36 */
37
38 #include <kdp/processor_core.h>
39 #include <kdp/kdp_core.h>
40 #include <kdp/core_notes.h>
41 #include <kdp/sk_core.h>
42
43 #include <machine/machine_routines.h>
44 #include <pexpert/boot.h>
45 #include <arm64/sptm/sptm.h>
46 #include <arm64/proc_reg.h>
47 #include <vm/pmap.h>
48
49 #if EXCLAVES_COREDUMP
50
51 #pragma mark secure coredump data structures
52
53
54 /* Secure coredump reference constant type. */
55 struct secure_core_context {
56 uuid_t *scc_uuid; /* UUID to be added to coredump. */
57 uint64_t scc_segments; /* Total amount of segments. */
58 uint64_t scc_total; /* Total amount of bytes. */
59 };
60
61 /*
62 * cL4's debug signpost structure required to identify UUID of a cL4.
63 *
64 * Keep in sync with cL4's debug signpost.
65 * From: cL4/kernel/src/plat/common/lib/debug/signpost/signpost.h
66 */
67
68 /* Older structure magic/version. */
69 #define ASTRIS_UUID_MAGIC 'diuu'
70 #define ASTRIS_UUID_VERSION 5
71
72 /*
73 * Both old and new structures are identical up to uuid field.
74 * XNU does not need to read anything else, so the structure below
75 * defines only common fields up to uuid itself.
76 */
77 struct astris_uuid {
78 uint32_t magic;
79 uint32_t version;
80 uint32_t _reserved_0[2];
81 uuid_t uuid;
82 uint32_t _reserved_1[4];
83 } __attribute__((packed));
84
85 /* Current structure magic/version. */
86 #define DEBUG_SIGNPOST_MAGIC 'cL4D'
87 #define DEBUG_SIGNPOST_VERSION 0
88
89 struct debug_signpost {
90 uint32_t magic;
91 uint32_t version;
92 uuid_t uuid;
93 /* remaining fields are not interesting */
94 } __attribute__((packed));
95
96 /*
97 * Debug kernel header of cL4 kernel.
98 *
99 * Keep in sync with cL4's dbg_kernel_header
100 * From: kernel/src/plat/common/lib/dbgreg/internal.h
101 */
102
103 /* debug registry secure kernel record header version */
104 #define DBGREG_KERNEL_HEADER_VERSION 2
105
106 /* debug registry secure kernel record header */
107 struct dbg_kernel_header {
108 uint32_t version; /* header version */
109 uint32_t reserved; /* reserved */
110 uint64_t tcr; /* kernel tcr_el1 register value */
111 uint64_t ttbr1; /* kernel ttbr1_el1 register value */
112 uint64_t vbar; /* kernel vbar_el1 register value */
113 };
114
115 static SECURITY_READ_ONLY_LATE(struct dbg_kernel_header *sk_dbg_header) = NULL;
116
117 /*
118 * Artificial segment that will hold copy of 'seckern' LC_NOTE. This copy can be
119 * removed when LLDB allows access to LC_NOTEs to the scripted process.
120 *
121 * The actucal VA has to be kept in sync with cL4's scripted process plug-in.
122 *
123 * Removal tracked in rdar://116107495
124 */
125 static const uint64_t sk_dbg_header_seg_va = 0xFFFFFFFFFFFFC000;
126
127 /* coredumping mode */
128 __enum_closed_decl(secure_coredump_mode_t, unsigned int, {
129 SC_MODE_DISABLED, /* Coredump support disabled. */
130 SC_MODE_CDBG /* Use consistent debug entries. */
131 });
132
133 /* Referenced from kern_sysctl.c */
134 SECURITY_READ_ONLY_LATE(secure_coredump_mode_t sc_dump_mode) = SC_MODE_DISABLED;
135
136
137 #pragma mark Coredump configuration (consistent debug)
138
139
140 /*
141 * A dev-fused device should have a SECKERN entry in consistent debug records.
142 * It is used to find location of cL4 debug header.
143 *
144 * Consistent debug entry holds pointer into cL4's address space.
145 * It is not possible to access contents of the debug header until SPTM grants
146 * access to secure kernel's memory.
147 */
148
149 #define kDbgIdSecKernInfo (DEBUG_RECORD_ID_LONG('S','E','C','K','E','R','N', 0))
150
151 static kern_return_t
sc_init_cdbg(void)152 sc_init_cdbg(void)
153 {
154 uint64_t sk_debug_phys, sk_debug_len;
155
156 if (!PE_consistent_debug_enabled()) {
157 printf("secure_core: Consistent debug disabled.\n");
158 return KERN_FAILURE;
159 }
160
161 if (!PE_consistent_debug_lookup_entry(kDbgIdSecKernInfo, &sk_debug_phys, &sk_debug_len)) {
162 printf("secure_core: secure kernel entry missing in consistent debug.\n");
163 return KERN_FAILURE;
164 }
165
166 /* Configure consistent debug mode. */
167 sk_dbg_header = (struct dbg_kernel_header *)phystokv(sk_debug_phys);
168 sc_dump_mode = SC_MODE_CDBG;
169
170 return KERN_SUCCESS;
171 }
172
173
174 #pragma mark Address translation support
175
176
177 /*
178 * All structures required for dump live in cL4's VA space. It is required
179 * To perform translaction between cL4 VA into XNU VA so coredumper can
180 * access required data from XNU side.
181 */
182
183
184 /* Taken from pmap code - configuration for 4 levels page table walk. */
185 static const struct page_level_config {
186 const uint64_t size;
187 const uint64_t offmask;
188 const uint64_t shift;
189 const uint64_t index_mask;
190 const uint64_t valid_mask;
191 const uint64_t type_mask;
192 const uint64_t type_block;
193 } page_config[] = {
194 [0] = {
195 .size = ARM_16K_TT_L0_SIZE,
196 .offmask = ARM_16K_TT_L0_OFFMASK,
197 .shift = ARM_16K_TT_L0_SHIFT,
198 .index_mask = ARM_16K_TT_L0_INDEX_MASK,
199 .valid_mask = ARM_TTE_VALID,
200 .type_mask = ARM_TTE_TYPE_MASK,
201 .type_block = ARM_TTE_TYPE_BLOCK
202 },
203 [1] = {
204 .size = ARM_16K_TT_L1_SIZE,
205 .offmask = ARM_16K_TT_L1_OFFMASK,
206 .shift = ARM_16K_TT_L1_SHIFT,
207 .index_mask = ARM_16K_TT_L1_INDEX_MASK,
208 .valid_mask = ARM_TTE_VALID,
209 .type_mask = ARM_TTE_TYPE_MASK,
210 .type_block = ARM_TTE_TYPE_BLOCK
211 },
212 [2] = {
213 .size = ARM_16K_TT_L2_SIZE,
214 .offmask = ARM_16K_TT_L2_OFFMASK,
215 .shift = ARM_16K_TT_L2_SHIFT,
216 .index_mask = ARM_16K_TT_L2_INDEX_MASK,
217 .valid_mask = ARM_TTE_VALID,
218 .type_mask = ARM_TTE_TYPE_MASK,
219 .type_block = ARM_TTE_TYPE_BLOCK
220 },
221 [3] = {
222 .size = ARM_16K_TT_L3_SIZE,
223 .offmask = ARM_16K_TT_L3_OFFMASK,
224 .shift = ARM_16K_TT_L3_SHIFT,
225 .index_mask = ARM_16K_TT_L3_INDEX_MASK,
226 .valid_mask = ARM_PTE_TYPE_VALID,
227 .type_mask = ARM_PTE_TYPE_MASK,
228 .type_block = ARM_TTE_TYPE_L3BLOCK
229 }
230 };
231
232 /* Return kernel's VA size from TCR. */
233 static inline uint64_t
sc_va_size_bits(uint64_t tcr)234 sc_va_size_bits(uint64_t tcr)
235 {
236 return 64 - ((tcr >> TCR_T1SZ_SHIFT) & TCR_TSZ_MASK);
237 }
238
239 /* Return page size in bits based on TCR. */
240 static inline uint64_t
sc_va_page_size_bits(uint64_t tcr)241 sc_va_page_size_bits(uint64_t tcr)
242 {
243 const uint64_t tg1 = tcr & (TCR_TG1_GRANULE_MASK << TCR_TG1_GRANULE_SHIFT);
244
245 /* Only 16K pages are supported. */
246 if (tg1 == TCR_TG1_GRANULE_16KB) {
247 return 14;
248 }
249
250 return 0;
251 }
252
253 /* Page table walk configuration. */
254 static uint64_t sc_vaddr_mask = 0;
255 static int sc_page_levels = 0;
256
257 static kern_return_t
sc_boostrap_va(struct dbg_kernel_header * hdrp)258 sc_boostrap_va(struct dbg_kernel_header *hdrp)
259 {
260 /* validate and set VA bit mask */
261 const uint64_t va_size_bits = sc_va_size_bits(hdrp->tcr);
262 if (va_size_bits == 0) {
263 kern_coredump_log(NULL, "secure_core: Invalid VA bit size: 0x%llx", hdrp->tcr);
264 return KERN_FAILURE;
265 }
266
267 sc_vaddr_mask = (1UL << va_size_bits) - 1;
268
269 /* validate and set page size / levels */
270 const uint64_t page_size_bits = sc_va_page_size_bits(hdrp->tcr);
271 if (page_size_bits == 0) {
272 kern_coredump_log(NULL, "secure_core: Invalid page size bits: 0x%llx", hdrp->tcr);
273 return KERN_FAILURE;
274 }
275
276 sc_page_levels = (int)(((va_size_bits - page_size_bits) + ((page_size_bits - 3) - 1)) / (page_size_bits - 3));
277 return KERN_SUCCESS;
278 }
279
280 /*
281 * Converts cL4 kernel VA to XNU VA.
282 *
283 * The translation starts with cL4's TTBR1 and maps a cL4 virtual address to
284 * physical address. A physical address is then mapped back to XNU's PAPT virtual
285 * address.
286 *
287 * Note: panics if SPTM has not unlocked SK_DOMAIN memory access.
288 */
289 static vm_map_address_t
sc_cL4_kvtov(vm_map_address_t vaddr)290 sc_cL4_kvtov(vm_map_address_t vaddr)
291 {
292 tt_entry_t *ttp = (tt_entry_t *)phystokv(sk_dbg_header->ttbr1 & ARM_TTE_PA_MASK);
293 tt_entry_t *ttep = NULL;
294 tt_entry_t tte = ARM_TTE_EMPTY;
295 pmap_paddr_t paddr = 0;
296
297 vaddr = vaddr & sc_vaddr_mask;
298
299 for (int cur_level = 4 - sc_page_levels; cur_level <= 3; cur_level++) {
300 const uint64_t idx = vaddr & page_config[cur_level].index_mask;
301 const uint64_t valid_mask = page_config[cur_level].valid_mask;
302 const uint64_t type_mask = page_config[cur_level].type_mask;
303 const uint64_t type_block = page_config[cur_level].type_block;
304 const uint64_t offmask = page_config[cur_level].offmask;
305
306 /* Find TTE index for current level. */
307 ttep = &ttp[idx >> page_config[cur_level].shift];
308 tte = *ttep;
309
310 /* Check if we have valid tte */
311 if ((tte & valid_mask) != valid_mask) {
312 return 0;
313 }
314
315 /* Detect a leaf entry/block address. */
316 if ((tte & type_mask) == type_block) {
317 paddr = ((tte & ARM_TTE_PA_MASK & ~offmask) | (vaddr & offmask));
318 break;
319 }
320
321 /* Find next TTE through PAPT */
322 ttp = (tt_entry_t *)phystokv(tte & ARM_TTE_TABLE_MASK);
323 }
324
325 /* Translate physical address back to XNU VA. */
326 return phystokv(paddr);
327 }
328
329 /*
330 * Locates cL4 debug signpost per agreed algorithm:
331 * 1. Find stepping stone at VBAR + 0x800
332 * 2. Construct final VA as VBAR + stepping stone
333 *
334 * Note: Requires access to SK_DOMAIN pages to avoid panic.
335 */
336 static uuid_t *
sc_find_uuid_cdbg(void)337 sc_find_uuid_cdbg(void)
338 {
339 uuid_t *uuid = NULL;
340
341 /* Return PAPT VA of a stepping stone. */
342 vm_map_address_t vbar = sc_cL4_kvtov(sk_dbg_header->vbar);
343 if (vbar == 0) {
344 kern_coredump_log(NULL, "secure_core: Can't translate VBAR address\n");
345 return NULL;
346 }
347
348 /* Construct final debug singpost address. */
349 uint64_t offs = *(int64_t *)(vbar + 0x800);
350
351 /* Use structure based on magic. */
352 const vm_map_address_t signpostva = sc_cL4_kvtov(sk_dbg_header->vbar + offs);
353
354 /* Determine header based on magic and version */
355 const uint32_t magic = *(uint32_t *)signpostva;
356
357 switch (magic) {
358 case ASTRIS_UUID_MAGIC: {
359 struct astris_uuid *signpost = (struct astris_uuid *)signpostva;
360
361 if (signpost->version != ASTRIS_UUID_VERSION) {
362 kern_coredump_log(NULL, "secure_core: unsupported astris signpost version 0x%x\n", signpost->version);
363 break;
364 }
365
366 uuid = &signpost->uuid;
367 break;
368 }
369 case DEBUG_SIGNPOST_MAGIC: {
370 struct debug_signpost *signpost = (struct debug_signpost *)signpostva;
371
372 if (signpost->version != DEBUG_SIGNPOST_VERSION) {
373 kern_coredump_log(NULL, "secure_core: unsupported debug signpost version 0x%x\n", signpost->version);
374 break;
375 }
376
377 uuid = &signpost->uuid;
378 break;
379 }
380 default:
381 kern_coredump_log(NULL, "secure_core: unknwon signpost magic 0x%x\n", magic);
382 }
383
384 return uuid;
385 }
386
387
388 #pragma mark secure coredump memory dump
389
390
391 /* Pages to be walked. */
392 extern const vm_map_address_t physmap_base;
393 extern const vm_map_address_t physmap_end;
394
395 typedef kern_return_t (*papt_walk_callback)(pmap_paddr_t start, pmap_paddr_t end, void *context);
396
397 /*
398 * Returns wheter a PAPT page belongs to SK_DOMAIN.
399 *
400 * I/O pages are not collect as a part of secure coredump.
401 */
402 static inline bool
is_sk_type(int type)403 is_sk_type(int type)
404 {
405 return type == SK_DEFAULT || type == SK_SHARED_RO || type == SK_SHARED_RW;
406 }
407
408 /*
409 * Walks XNU's PAPT and finds all physical pages from SK_DOMAIN.
410 * Contiguous ranges of physical pages are reported as single segment.
411 */
412 static kern_return_t
papt_walk(papt_walk_callback cb,void * context)413 papt_walk(papt_walk_callback cb, void *context)
414 {
415 pmap_paddr_t seg_start = 0;
416 pmap_paddr_t seg_end = 0;
417 pmap_paddr_t last_paddr = 0;
418 kern_return_t kr = KERN_SUCCESS;
419
420 for (vm_map_address_t vaddr = physmap_base; vaddr < physmap_end; vaddr += PAGE_SIZE_64) {
421 pmap_paddr_t paddr = kvtophys(vaddr);
422
423 /* Skip non secure domain pages. */
424 if (!is_sk_type(sptm_get_frame_type(paddr))) {
425 continue;
426 }
427
428 /* Open new segment if we don't have one already. */
429 if (seg_start == 0) {
430 seg_start = paddr;
431 seg_end = paddr + PAGE_SIZE_64;
432 last_paddr = paddr;
433 continue;
434 }
435
436 /* Prolong currently opened segment if PA is contiguous. */
437 if (paddr == last_paddr + PAGE_SIZE_64) {
438 seg_end = paddr + PAGE_SIZE_64;
439 last_paddr = paddr;
440 continue;
441 }
442
443 /* Close the segment and send it out to callback. */
444 kr = cb(seg_start, seg_end, context);
445 if (kr != KERN_SUCCESS) {
446 kern_coredump_log(NULL, "secure_core: PAPT walk callback failed with %d\n", kr);
447 return kr;
448 }
449
450 /* Open new segment. */
451 seg_start = paddr;
452 seg_end = paddr + PAGE_SIZE_64;
453 last_paddr = paddr;
454 }
455
456 /* report last segment */
457 if (seg_start != 0) {
458 kr = cb(seg_start, seg_end, context);
459 if (kr != KERN_SUCCESS) {
460 kern_coredump_log(NULL, "secure_core: PAPT walk callback failed with %d\n", kr);
461 }
462 }
463
464 return kr;
465 }
466
467
468 #pragma mark secure coredump helper callbacks
469
470
471 /*
472 * It is not possible to allocate a memory on panic path. Thus this code has
473 * to scan cL4's memory multiple times. First a preflight identifies how many
474 * segments and total size we are about to dump. Remaining rounds are for
475 * actuall data extraction.
476 *
477 * Getting totals ahead of the actual dump is not only required to show progress
478 * but also to periodically prolong panic watchdog.
479 */
480
481
482 static kern_return_t
secure_summary(pmap_paddr_t start,pmap_paddr_t end,void * refcon)483 secure_summary(pmap_paddr_t start, pmap_paddr_t end, void *refcon)
484 {
485 struct secure_core_context *scc = refcon;
486
487 scc->scc_segments++;
488 scc->scc_total += (end - start);
489
490 return KERN_SUCCESS;
491 }
492
493 static kern_return_t
sk_dump_init(void * refcon,void * context)494 sk_dump_init(void *refcon, void *context)
495 {
496 /* Core dump disabled for some reason. */
497 if (sc_dump_mode == SC_MODE_DISABLED) {
498 kern_coredump_log(context, "secure_core: Disabled\n");
499 return KERN_NODE_DOWN;
500 }
501
502 if (sk_dbg_header == NULL) {
503 kern_coredump_log(context, "secure_core: No debug header\n");
504 return KERN_NODE_DOWN;
505 }
506
507 /* Unlock access to secure domain pages. */
508 sptm_map_sk_domain();
509
510 /* Bootstrap secure kernel page table translation. */
511 if (sc_boostrap_va(sk_dbg_header) != KERN_SUCCESS) {
512 kern_coredump_log(context, "secure_core: Invalid debug header contents.\n");
513 return KERN_NODE_DOWN;
514 }
515
516 /* validate debug header */
517 if (sk_dbg_header->version != DBGREG_KERNEL_HEADER_VERSION) {
518 kern_coredump_log(context, "secure_core: Debug header version (%d) mismatch\n",
519 sk_dbg_header->version);
520 return KERN_NODE_DOWN;
521 }
522
523 /* validate debug signpost and discover UUID. */
524 struct secure_core_context *scc = (struct secure_core_context *)refcon;
525 scc->scc_uuid = sc_find_uuid_cdbg();
526
527 if (scc->scc_uuid == NULL) {
528 kern_coredump_log(context, "secure_core: No UUID found\n");
529 return KERN_NODE_DOWN;
530 }
531
532 return KERN_SUCCESS;
533 }
534
535 static kern_return_t
sk_dump_get_summary(void * refcon,core_save_summary_cb callback,void * context)536 sk_dump_get_summary(void *refcon, core_save_summary_cb callback, void *context)
537 {
538 kern_return_t ret;
539
540 ret = papt_walk(secure_summary, refcon);
541 if (ret != KERN_SUCCESS) {
542 kern_coredump_log(context, "secure_core: Unable to get summary: %d\n", ret);
543 return ret;
544 }
545
546 /* Account for extra segment with debug header for LLDB's scripted process. */
547 struct secure_core_context *scc = refcon;
548 scc->scc_segments++;
549 scc->scc_total += sizeof(struct dbg_kernel_header);
550
551 return callback(scc->scc_segments, scc->scc_total, 0, 0, 0, context);
552 }
553
554 typedef struct secure_segment_desc {
555 core_save_segment_descriptions_cb ssd_callback;
556 void * ssd_context;
557 } secure_segment_desc_t;
558
559 static kern_return_t
secure_seg_desc(pmap_paddr_t start,pmap_paddr_t end,void * context)560 secure_seg_desc(pmap_paddr_t start, pmap_paddr_t end, void *context)
561 {
562 secure_segment_desc_t *ssd = context;
563
564 return ssd->ssd_callback(start, end, ssd->ssd_context);
565 }
566
567 static kern_return_t
sk_dump_save_seg_desc(void * refcon __unused,core_save_segment_descriptions_cb callback,void * context)568 sk_dump_save_seg_desc(void *refcon __unused, core_save_segment_descriptions_cb callback,
569 void *context)
570 {
571 kern_return_t ret;
572 secure_segment_desc_t ssd = {
573 .ssd_callback = callback,
574 .ssd_context = context
575 };
576
577 ret = papt_walk(secure_seg_desc, &ssd);
578 if (ret != KERN_SUCCESS) {
579 kern_coredump_log(context, "secure_core: Unable to save segment description: %d\n", ret);
580 return ret;
581 }
582
583 /* Save cL4's debug header as a special segment. */
584 return callback(sk_dbg_header_seg_va, sk_dbg_header_seg_va + sizeof(struct dbg_kernel_header), context);
585 }
586
587
588 typedef struct secure_segment_data {
589 core_save_segment_data_cb ssd_callback;
590 void * ssd_context;
591 } secure_segment_data_t;
592
593 static kern_return_t
secure_seg_data(pmap_paddr_t start,pmap_paddr_t end,void * context)594 secure_seg_data(pmap_paddr_t start, pmap_paddr_t end, void *context)
595 {
596 secure_segment_data_t *ssd = context;
597
598 return ssd->ssd_callback((void *)phystokv(start), end - start, ssd->ssd_context);
599 }
600
601 static kern_return_t
sk_dump_save_seg_data(void * refcon __unused,core_save_segment_data_cb callback,void * context)602 sk_dump_save_seg_data(void *refcon __unused, core_save_segment_data_cb callback,
603 void *context)
604 {
605 kern_return_t ret;
606 secure_segment_data_t ssd = {
607 .ssd_callback = callback,
608 .ssd_context = context
609 };
610
611 ret = papt_walk(secure_seg_data, &ssd);
612 if (ret != KERN_SUCCESS) {
613 kern_coredump_log(context, "secure_core: Unable to save segment data: %d\n", ret);
614 return ret;
615 }
616
617 /* Save cL4 debug header as a last segment. */
618 return callback(sk_dbg_header, sizeof(struct dbg_kernel_header), context);
619 }
620
621 static kern_return_t
sk_dump_save_thread_state(void * refcon __unused,void * buf __unused,core_save_thread_state_cb callback __unused,void * context __unused)622 sk_dump_save_thread_state(void *refcon __unused, void *buf __unused,
623 core_save_thread_state_cb callback __unused, void *context __unused)
624 {
625 /* All threads are parked on XNU side so there is no cL4 thread state. */
626 return KERN_FAILURE;
627 }
628
629 static kern_return_t
sk_dump_save_sw_vers_detail(void * refcon,core_save_sw_vers_detail_cb callback,void * context)630 sk_dump_save_sw_vers_detail(void *refcon,
631 core_save_sw_vers_detail_cb callback, void *context)
632 {
633 struct secure_core_context *scc = refcon;
634 return callback(0, *(scc->scc_uuid), 0, context);
635 }
636
637 /*
638 * LC_NOTE from consistent debug.
639 *
640 * Contains snapshot of consistent debug region from cL4.
641 * The note uses its own versioning to support further extension.
642 */
643
644 #define SECKERN_DATA_OWNER "seckern"
645
646 typedef struct seckern_note {
647 uint32_t version; /* currently 1 */
648 struct dbg_kernel_header header;
649 } __attribute__((packed)) seckern_note_t;
650
651 #define SECKERN_VER 1
652
653 static kern_return_t
sk_dump_save_note_summary(void * refcon __unused,core_save_note_summary_cb callback,void * context)654 sk_dump_save_note_summary(void *refcon __unused, core_save_note_summary_cb callback, void *context)
655 {
656 return callback(1, sizeof(seckern_note_t), context);
657 }
658
659 static kern_return_t
sk_dump_save_note_desc(void * refcon __unused,core_save_note_descriptions_cb callback,void * context)660 sk_dump_save_note_desc(void *refcon __unused, core_save_note_descriptions_cb callback, void *context)
661 {
662 return callback(SECKERN_DATA_OWNER, sizeof(seckern_note_t), context);
663 }
664
665 static kern_return_t
sk_dump_save_note_data(void * refcon __unused,core_save_note_data_cb callback,void * context)666 sk_dump_save_note_data(void *refcon __unused, core_save_note_data_cb callback, void *context)
667 {
668 seckern_note_t seckern_note = {
669 .version = SECKERN_VER,
670 .header = *sk_dbg_header
671 };
672
673 return callback(&seckern_note, sizeof(seckern_note), context);
674 }
675
676
677 #pragma mark secure coredump handler registration
678
679
680 /* Static refconst is fine as secure coredump has only single instance. */
681 static struct secure_core_context sc_context;
682
683 bool
sk_core_enabled(void)684 sk_core_enabled(void)
685 {
686 return SPTMArgs->sk_bootstrapped;
687 }
688
689 uint64_t
sk_core_size(void)690 sk_core_size(void)
691 {
692 static const uint64_t one_mb = 1024ULL * 1024ULL;
693 return (sk_core_enabled()) ? 750 * one_mb : 0;
694 }
695
696 /*
697 * Initialize secure kernel coredump.
698 *
699 * Registers a coredump helper only if secure kernel's cL4 is able to provide
700 * all requried debugging information to XNU:
701 *
702 * - debug header for TTBR1 page table unwinding
703 * - debug signpost for UUID
704 *
705 * It is not possible to validate any contents of the structures as XNU does
706 * not have access to cL4's memory at this point. The access will be granted
707 * later from panic context.
708 */
709 void
sk_core_init(void)710 sk_core_init(void)
711 {
712 kern_coredump_callback_config sk_config = { };
713 kern_return_t kr;
714
715 /*
716 * User can disable secure kernel coredump by adding following boot-arg:
717 * secure_coredump=0
718 */
719 unsigned int barg;
720 if (PE_parse_boot_argn("secure_coredump", &barg, sizeof(barg)) &&
721 barg == SC_MODE_DISABLED) {
722 printf("secure_core: disabled by boot-arg\n");
723 return;
724 }
725
726 if (sc_init_cdbg() != KERN_SUCCESS) {
727 printf("secure_core: not supported\n");
728 sc_dump_mode = SC_MODE_DISABLED;
729 return;
730 }
731
732 if (SPTMArgs->sptm_variant != SPTM_VARIANT_DEVELOPMENT) {
733 printf("secure_core: requires development sptm\n");
734 sc_dump_mode = SC_MODE_DISABLED;
735 return;
736 }
737
738 if (!sk_core_enabled()) {
739 printf("secure_core: no secure kernel present\n");
740 sc_dump_mode = SC_MODE_DISABLED;
741 return;
742 }
743
744 printf("secure_core: mode = %d\n", sc_dump_mode);
745
746 /* Register coredump handler. */
747 sk_config.kcc_coredump_init = sk_dump_init;
748 sk_config.kcc_coredump_get_summary = sk_dump_get_summary;
749 sk_config.kcc_coredump_save_segment_descriptions = sk_dump_save_seg_desc;
750 sk_config.kcc_coredump_save_segment_data = sk_dump_save_seg_data;
751 sk_config.kcc_coredump_save_thread_state = sk_dump_save_thread_state;
752 sk_config.kcc_coredump_save_sw_vers_detail = sk_dump_save_sw_vers_detail;
753 sk_config.kcc_coredump_save_note_summary = sk_dump_save_note_summary;
754 sk_config.kcc_coredump_save_note_descriptions = sk_dump_save_note_desc;
755 sk_config.kcc_coredump_save_note_data = sk_dump_save_note_data;
756
757 kr = kern_register_sk_coredump_helper(&sk_config, &sc_context);
758 assert3u(kr, ==, KERN_SUCCESS);
759 }
760
761 #else /* EXCLAVES_COREDUMP */
762
763 bool
sk_core_enabled(void)764 sk_core_enabled(void)
765 {
766 return false;
767 }
768
769 void
sk_core_init(void)770 sk_core_init(void)
771 {
772 }
773
774 uint64_t
sk_core_size(void)775 sk_core_size(void)
776 {
777 return 0;
778 }
779
780 #endif /* EXCLAVES_COREDUMP */
781