1 /*
2 * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * The main orchestrator for kernel (and co-processor) coredumps. Here's a very simplistic view of
31 * the flow:
32 *
33 * At kernel initialization time (kdp_core_init):
34 * ----------------------------------------------
35 *
36 * - kdp_core_init() takes care of allocating all necessary data structures and initializes the
37 * coredump output stages
38 *
39 * At coredump time (do_kern_dump):
40 * --------------------------------
41 *
42 * - Depending on the coredump variant, we chain the necessary output stages together in chain_output_stages()
43 * - [Disk only] We initialize the corefile header
44 * - [Disk only] We stream the stackshot out through the output stages and update the corefile header
45 * - We perform the kernel coredump, streaming it out through the output stages
46 * - [Disk only] We update the corefile header
47 * - [Disk only] We perform the co-processor coredumps (driven by kern_do_coredump), streaming each out
48 * through the output stages and updating the corefile header.
49 * - [Disk only] We save the coredump log to the corefile
50 */
51
52 #include <mach/kern_return.h>
53 #include <mach/vm_types.h>
54 #include <kdp/core_exclude.h>
55 #include <kdp/kdp_core.h>
56
57 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
58
59 #include <mach/mach_types.h>
60 #include <mach/vm_attributes.h>
61 #include <mach/vm_param.h>
62 #include <mach/vm_map.h>
63 #include <vm/vm_protos.h>
64 #include <vm/vm_kern.h>
65 #include <vm/vm_map.h>
66 #include <machine/cpu_capabilities.h>
67 #include <libsa/types.h>
68 #include <libkern/kernel_mach_header.h>
69 #include <kern/locks.h>
70 #include <kdp/kdp_internal.h>
71 #include <kdp/output_stages/output_stages.h>
72 #include <kdp/processor_core.h>
73 #include <IOKit/IOTypes.h>
74 #include <IOKit/IOBSD.h>
75 #include <sys/errno.h>
76 #include <sys/msgbuf.h>
77 #include <san/kasan.h>
78 #include <kern/debug.h>
79 #include <pexpert/pexpert.h>
80 #include <os/atomic_private.h>
81
82 #if defined(__x86_64__)
83 #include <i386/pmap_internal.h>
84 #include <kdp/ml/i386/kdp_x86_common.h>
85 #include <kern/debug.h>
86 #endif /* defined(__x86_64__) */
87
88 kern_return_t kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context);
89 kern_return_t kdp_core_polled_io_polled_file_unavailable(void);
90
91 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
92 vm_map_offset_t end,
93 void *context);
94
95 static kern_return_t kern_dump_init(void *refcon, void *context);
96 static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
97 static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
98 static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
99 static int kern_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context);
100 static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
101
102 static int
103 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
104 vm_map_offset_t end,
105 void *context);
106 static int
107 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
108 vm_map_offset_t end,
109 void *context);
110
111 static int
112 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
113 vm_map_offset_t end,
114 void *context);
115
116 static struct kdp_output_stage disk_output_stage = {};
117 static struct kdp_output_stage lz4_output_stage = {};
118 static struct kdp_output_stage zlib_output_stage = {};
119 static struct kdp_output_stage buffer_output_stage = {};
120 static struct kdp_output_stage net_output_stage = {};
121 static struct kdp_output_stage progress_notify_output_stage = {};
122 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
123 static struct kdp_output_stage aea_output_stage = {};
124 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
125 #if defined(__arm64__)
126 static struct kdp_output_stage shmem_output_stage = {};
127 static struct kdp_output_stage memory_backing_aware_buffer_output_stage = {};
128 #endif /* defined(__arm64__) */
129
130 extern uint32_t kdp_crashdump_pkt_size;
131
132 static boolean_t kern_dump_successful = FALSE;
133
134 static const size_t kdp_core_header_size = sizeof(struct mach_core_fileheader_v2) + (KERN_COREDUMP_MAX_CORES * sizeof(struct mach_core_details_v2));
135 static struct mach_core_fileheader_v2 *kdp_core_header = NULL;
136
137 static lck_grp_t *kdp_core_initialization_lock_group = NULL;
138 static lck_mtx_t *kdp_core_disk_stage_lock = NULL;
139 static bool kdp_core_is_initializing_disk_stage = false;
140
141 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
142 static const size_t PUBLIC_KEY_RESERVED_LENGTH = roundup(4096, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
143 static void *kdp_core_public_key = NULL;
144 static lck_mtx_t *kdp_core_encryption_stage_lock = NULL;
145 static bool kdp_core_is_initializing_encryption_stage = false;
146
147 static bool kern_dump_should_enforce_encryption(void);
148 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
149
150 static lck_mtx_t *kdp_core_lz4_stage_lock = NULL;
151 static bool kdp_core_is_initializing_lz4_stage = false;
152
153 /*
154 * These variables will be modified by the BSD layer if the root device is
155 * a RAMDisk.
156 */
157 uint64_t kdp_core_ramdisk_addr = 0;
158 uint64_t kdp_core_ramdisk_size = 0;
159
160 #define COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY (1 << 0)
161 #define COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT (1 << 1)
162
163 boolean_t
kdp_has_polled_corefile(void)164 kdp_has_polled_corefile(void)
165 {
166 return NULL != gIOPolledCoreFileVars;
167 }
168
169 kern_return_t
kdp_polled_corefile_error(void)170 kdp_polled_corefile_error(void)
171 {
172 return gIOPolledCoreFileOpenRet;
173 }
174
175 IOPolledCoreFileMode_t
kdp_polled_corefile_mode(void)176 kdp_polled_corefile_mode(void)
177 {
178 return gIOPolledCoreFileMode;
179 }
180
181 struct kdp_core_excluded_region {
182 struct kdp_core_excluded_region *next;
183 vm_offset_t addr;
184 vm_size_t size;
185 };
186
187 static LCK_GRP_DECLARE(excluded_regions_grp, "kdp-exclude-regions");
188 static LCK_MTX_DECLARE(excluded_regions_mtx, &excluded_regions_grp);
189 static struct kdp_core_excluded_region *excluded_regions;
190
191 kern_return_t
kdp_core_exclude_region(vm_offset_t addr,vm_size_t size)192 kdp_core_exclude_region(vm_offset_t addr, vm_size_t size)
193 {
194 struct kdp_core_excluded_region *region;
195
196 if (addr >= addr + size) {
197 panic("%s: cannot exclude region starting at %p with size %zu (zero or overflowing size)",
198 __func__, (void*)addr, (size_t)size);
199 }
200 if (addr != round_page(addr) || size != round_page(size)) {
201 panic("%s: cannot exclude region starting at %p with size %zu (not page aligned)",
202 __func__, (void*)addr, (size_t)size);
203 }
204
205 region = kalloc_type(typeof(*region), Z_WAITOK | Z_NOFAIL);
206 region->addr = addr;
207 region->size = size;
208
209 lck_mtx_lock(&excluded_regions_mtx);
210 region->next = excluded_regions;
211 excluded_regions = region;
212 lck_mtx_unlock(&excluded_regions_mtx);
213
214 return KERN_SUCCESS;
215 }
216
217 kern_return_t
kdp_core_unexclude_region(vm_offset_t addr,vm_size_t size)218 kdp_core_unexclude_region(vm_offset_t addr, vm_size_t size)
219 {
220 struct kdp_core_excluded_region *region;
221 struct kdp_core_excluded_region **fixup = &excluded_regions;
222
223 lck_mtx_lock(&excluded_regions_mtx);
224 for (region = excluded_regions; region; region = region->next) {
225 if (region->addr == addr && region->size == size) {
226 *fixup = region->next;
227 break;
228 }
229 fixup = ®ion->next;
230 }
231 if (!region) {
232 panic("%s: cannot unexclude region starting at %p with size %zu (not currently excluded)",
233 __func__, (void*)addr, (size_t)size);
234 }
235 lck_mtx_unlock(&excluded_regions_mtx);
236
237 // We had exclusive access to the list when we removed the region, and it is no longer
238 // reachable from the list, so it is safe to free.
239 kfree_type(typeof(*region), region);
240
241 return KERN_SUCCESS;
242 }
243
244 static bool
kernel_vaddr_in_excluded_region(vm_offset_t addr,uint64_t * vincr)245 kernel_vaddr_in_excluded_region(vm_offset_t addr, uint64_t *vincr)
246 {
247 struct kdp_core_excluded_region *region;
248
249 // We check this earlier before attempting to dump the kernel, but verify here.
250 assert(!kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx));
251
252 for (region = excluded_regions; region; region = region->next) {
253 if (region->addr <= addr && addr < (region->addr + region->size)) {
254 *vincr = region->size;
255 return true;
256 }
257 }
258
259 return false;
260 }
261
262 kern_return_t
kdp_core_output(void * kdp_core_out_state,uint64_t length,void * data)263 kdp_core_output(void *kdp_core_out_state, uint64_t length, void * data)
264 {
265 kern_return_t err = KERN_SUCCESS;
266 uint64_t percent;
267 struct kdp_core_out_state *vars = (struct kdp_core_out_state *)kdp_core_out_state;
268 struct kdp_output_stage *first_stage = STAILQ_FIRST(&vars->kcos_out_stage);
269
270 if (vars->kcos_error == KERN_SUCCESS) {
271 #if DEVELOPMENT || DEBUG
272 // panic testing: force the write to fail after X number of writes
273 if ((panic_test_case & PANIC_TEST_CASE_COREFILE_IO_ERR) && (--panic_test_action_count == 0)) {
274 panic_test_case &= ~PANIC_TEST_CASE_COREFILE_IO_ERR;
275 length = -1;
276 }
277 #endif
278
279 if ((err = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, length, data)) != KERN_SUCCESS) {
280 kern_coredump_log(NULL, "(kdp_core_output) outproc(KDP_DATA, NULL, 0x%llx, %p) returned 0x%x\n",
281 length, data, err);
282 vars->kcos_error = err;
283 }
284 if (!data && !length) {
285 kern_coredump_log(NULL, "100..");
286 } else {
287 vars->kcos_bytes_written += length;
288 percent = (vars->kcos_bytes_written * 100) / vars->kcos_totalbytes;
289 if ((percent - vars->kcos_lastpercent) >= 10) {
290 vars->kcos_lastpercent = percent;
291 kern_coredump_log(NULL, "%lld..\n", percent);
292 }
293 }
294 }
295 return err;
296 }
297
298 #if defined(__arm64__)
299 extern pmap_paddr_t avail_start, avail_end;
300 extern struct vm_object pmap_object_store;
301 #endif
302 extern vm_offset_t c_buffers;
303 extern vm_size_t c_buffers_size;
304
305 static bool
kernel_vaddr_in_coredump_stage(const struct kdp_output_stage * stage,uint64_t vaddr,uint64_t * vincr)306 kernel_vaddr_in_coredump_stage(const struct kdp_output_stage *stage, uint64_t vaddr, uint64_t *vincr)
307 {
308 uint64_t start_addr = (uint64_t)stage->kos_data;
309 uint64_t end_addr = start_addr + stage->kos_data_size;
310
311 if (!stage->kos_data) {
312 return false;
313 }
314
315 if (vaddr >= start_addr && vaddr < end_addr) {
316 *vincr = stage->kos_data_size - (vaddr - start_addr);
317 return true;
318 }
319
320 return false;
321 }
322
323 static bool
kernel_vaddr_in_coredump_stages(uint64_t vaddr,uint64_t * vincr)324 kernel_vaddr_in_coredump_stages(uint64_t vaddr, uint64_t *vincr)
325 {
326 if (kernel_vaddr_in_coredump_stage(&disk_output_stage, vaddr, vincr)) {
327 return true;
328 }
329
330 if (kernel_vaddr_in_coredump_stage(&lz4_output_stage, vaddr, vincr)) {
331 return true;
332 }
333
334 if (kernel_vaddr_in_coredump_stage(&zlib_output_stage, vaddr, vincr)) {
335 return true;
336 }
337
338 if (kernel_vaddr_in_coredump_stage(&buffer_output_stage, vaddr, vincr)) {
339 return true;
340 }
341
342 if (kernel_vaddr_in_coredump_stage(&net_output_stage, vaddr, vincr)) {
343 return true;
344 }
345
346 if (kernel_vaddr_in_coredump_stage(&progress_notify_output_stage, vaddr, vincr)) {
347 return true;
348 }
349
350 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
351 if (kernel_vaddr_in_coredump_stage(&aea_output_stage, vaddr, vincr)) {
352 return true;
353 }
354 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
355
356 #if defined(__arm64__)
357 if (kernel_vaddr_in_coredump_stage(&shmem_output_stage, vaddr, vincr)) {
358 return true;
359 }
360 #endif /* defined(__arm64__) */
361
362 #if defined(__arm64__)
363 if (kernel_vaddr_in_coredump_stage(&memory_backing_aware_buffer_output_stage, vaddr, vincr)) {
364 return true;
365 }
366 #endif /* defined(__arm64__) */
367
368 return false;
369 }
370
371 ppnum_t
kernel_pmap_present_mapping(uint64_t vaddr,uint64_t * pvincr,uintptr_t * pvphysaddr)372 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
373 {
374 ppnum_t ppn = 0;
375 uint64_t vincr = PAGE_SIZE_64;
376
377 assert(!(vaddr & PAGE_MASK_64));
378
379 /* VA ranges to exclude */
380 if (vaddr == c_buffers) {
381 /* compressor data */
382 ppn = 0;
383 vincr = c_buffers_size;
384 } else if (kernel_vaddr_in_coredump_stages(vaddr, &vincr)) {
385 /* coredump output stage working memory */
386 ppn = 0;
387 } else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) {
388 ppn = 0;
389 vincr = kdp_core_ramdisk_size;
390 } else
391 #if defined(__arm64__)
392 if (vaddr == phystokv(avail_start)) {
393 /* physical memory map */
394 ppn = 0;
395 vincr = (avail_end - avail_start);
396 } else
397 #endif /* defined(__arm64__) */
398 {
399 ppn = (pvphysaddr != NULL ?
400 pmap_find_phys(kernel_pmap, vaddr) :
401 pmap_find_phys_nofault(kernel_pmap, vaddr));
402 }
403
404 *pvincr = round_page_64(vincr);
405
406 if (ppn && pvphysaddr) {
407 uint64_t phys = ptoa_64(ppn);
408 if (physmap_enclosed(phys)) {
409 *pvphysaddr = phystokv(phys);
410 } else {
411 ppn = 0;
412 }
413 }
414
415 return ppn;
416 }
417
418 static int
pmap_traverse_present_mappings(pmap_t __unused pmap,vm_map_offset_t start,vm_map_offset_t end,pmap_traverse_callback callback,void * context)419 pmap_traverse_present_mappings(pmap_t __unused pmap,
420 vm_map_offset_t start,
421 vm_map_offset_t end,
422 pmap_traverse_callback callback,
423 void *context)
424 {
425 IOReturn ret;
426 vm_map_offset_t vcurstart, vcur;
427 uint64_t vincr = 0;
428 vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
429 vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
430 #if defined(XNU_TARGET_OS_BRIDGE)
431 vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base);
432 vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size));
433 #endif
434
435 boolean_t lastvavalid;
436 #if defined(__arm64__)
437 vm_page_t m = VM_PAGE_NULL;
438 #endif
439
440 #if defined(__x86_64__)
441 assert(!is_ept_pmap(pmap));
442 #endif
443
444 /* Assumes pmap is locked, or being called from the kernel debugger */
445
446 if (start > end) {
447 return KERN_INVALID_ARGUMENT;
448 }
449
450 ret = KERN_SUCCESS;
451 lastvavalid = FALSE;
452 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) {
453 ppnum_t ppn = 0;
454
455 #if defined(__arm64__)
456 /* We're at the start of the physmap, so pull out the pagetable pages that
457 * are accessed through that region.*/
458 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) {
459 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
460 }
461
462 if (m != VM_PAGE_NULL) {
463 vm_map_offset_t vprev = vcur;
464 ppn = (ppnum_t)atop(avail_end);
465 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) {
466 /* Ignore pages that come from the static region and have already been dumped.*/
467 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) {
468 ppn = VM_PAGE_GET_PHYS_PAGE(m);
469 break;
470 }
471 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
472 }
473 vincr = PAGE_SIZE_64;
474 if (ppn == atop(avail_end)) {
475 vm_object_unlock(&pmap_object_store);
476 m = VM_PAGE_NULL;
477 // avail_end is not a valid physical address,
478 // so phystokv(avail_end) may not produce the expected result.
479 vcur = phystokv(avail_start) + (avail_end - avail_start);
480 } else {
481 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
482 vcur = phystokv(ptoa(ppn));
483 }
484 if (vcur != vprev) {
485 ret = callback(vcurstart, vprev, context);
486 lastvavalid = FALSE;
487 }
488 }
489 if (m == VM_PAGE_NULL) {
490 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
491 }
492 #else /* defined(__arm64__) */
493 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
494 #endif
495 if (ppn != 0 && kernel_vaddr_in_excluded_region(vcur, &vincr)) {
496 /* excluded region */
497 ppn = 0;
498 }
499 if (ppn != 0) {
500 if (((vcur < debug_start) || (vcur >= debug_end))
501 && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
502 #if defined(XNU_TARGET_OS_BRIDGE)
503 // include the macOS panic region if it's mapped
504 && ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
505 #endif
506 ) {
507 /* not something we want */
508 ppn = 0;
509 }
510 /* include the phys carveout only if explictly marked */
511 if (debug_is_in_phys_carveout(vcur) &&
512 !debug_can_coredump_phys_carveout()) {
513 ppn = 0;
514 }
515 }
516
517 if (ppn != 0) {
518 if (!lastvavalid) {
519 /* Start of a new virtual region */
520 vcurstart = vcur;
521 lastvavalid = TRUE;
522 }
523 } else {
524 if (lastvavalid) {
525 /* end of a virtual region */
526 ret = callback(vcurstart, vcur, context);
527 lastvavalid = FALSE;
528 }
529
530 #if defined(__x86_64__)
531 /* Try to skip by 2MB if possible */
532 if ((vcur & PDMASK) == 0) {
533 pd_entry_t *pde;
534 pde = pmap_pde(pmap, vcur);
535 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
536 /* Make sure we wouldn't overflow */
537 if (vcur < (end - NBPD)) {
538 vincr = NBPD;
539 }
540 }
541 }
542 #endif /* defined(__x86_64__) */
543 }
544 vcur += vincr;
545 }
546
547 if ((ret == KERN_SUCCESS) && lastvavalid) {
548 /* send previous run */
549 ret = callback(vcurstart, vcur, context);
550 }
551
552 #if KASAN
553 if (ret == KERN_SUCCESS) {
554 ret = kasan_traverse_mappings(callback, context);
555 }
556 #endif
557
558 return ret;
559 }
560
561 struct kern_dump_preflight_context {
562 uint32_t region_count;
563 uint64_t dumpable_bytes;
564 };
565
566 int
kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)567 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
568 vm_map_offset_t end,
569 void *context)
570 {
571 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
572 IOReturn ret = KERN_SUCCESS;
573
574 kdc->region_count++;
575 kdc->dumpable_bytes += (end - start);
576
577 return ret;
578 }
579
580
581 struct kern_dump_send_seg_desc_context {
582 core_save_segment_descriptions_cb callback;
583 void *context;
584 };
585
586 int
kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)587 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
588 vm_map_offset_t end,
589 void *context)
590 {
591 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
592 uint64_t seg_start = (uint64_t) start;
593 uint64_t seg_end = (uint64_t) end;
594
595 return kds_context->callback(seg_start, seg_end, kds_context->context);
596 }
597
598 struct kern_dump_send_segdata_context {
599 core_save_segment_data_cb callback;
600 void *context;
601 };
602
603 int
kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)604 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
605 vm_map_offset_t end,
606 void *context)
607 {
608 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
609
610 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
611 }
612
613 static kern_return_t
kern_dump_init(__unused void * refcon,void * context)614 kern_dump_init(__unused void *refcon, void *context)
615 {
616 /* TODO: consider doing mmu flush from an init function */
617
618 // If excluded regions list is locked, it is unsafe to dump the kernel.
619 if (kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx)) {
620 kern_coredump_log(context, "%s: skipping kernel because excluded regions list is locked\n",
621 __func__);
622 #if defined(__arm64__)
623 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE;
624 #else
625 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE;
626 #endif
627 paniclog_flush();
628 return KERN_NODE_DOWN;
629 }
630
631 return KERN_SUCCESS;
632 }
633
634 static int
kern_dump_save_summary(__unused void * refcon,core_save_summary_cb callback,void * context)635 kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
636 {
637 struct kern_dump_preflight_context kdc_preflight = { };
638 uint64_t thread_state_size = 0, thread_count = 0;
639 vm_map_offset_t vstart = kdp_core_start_addr();
640 kern_return_t ret;
641
642 ret = pmap_traverse_present_mappings(kernel_pmap,
643 vstart,
644 VM_MAX_KERNEL_ADDRESS,
645 kern_dump_pmap_traverse_preflight_callback,
646 &kdc_preflight);
647 if (ret != KERN_SUCCESS) {
648 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
649 return ret;
650 }
651
652 kern_collectth_state_size(&thread_count, &thread_state_size);
653
654 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
655 thread_count, thread_state_size, 0, context);
656 return ret;
657 }
658
659 static int
kern_dump_save_seg_descriptions(__unused void * refcon,core_save_segment_descriptions_cb callback,void * context)660 kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
661 {
662 vm_map_offset_t vstart = kdp_core_start_addr();
663 kern_return_t ret;
664 struct kern_dump_send_seg_desc_context kds_context;
665
666 kds_context.callback = callback;
667 kds_context.context = context;
668
669 ret = pmap_traverse_present_mappings(kernel_pmap,
670 vstart,
671 VM_MAX_KERNEL_ADDRESS,
672 kern_dump_pmap_traverse_send_segdesc_callback,
673 &kds_context);
674 if (ret != KERN_SUCCESS) {
675 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
676 return ret;
677 }
678
679 return KERN_SUCCESS;
680 }
681
682 static int
kern_dump_save_thread_state(__unused void * refcon,void * buf,core_save_thread_state_cb callback,void * context)683 kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
684 {
685 kern_return_t ret;
686 uint64_t thread_state_size = 0, thread_count = 0;
687
688 kern_collectth_state_size(&thread_count, &thread_state_size);
689
690 if (thread_state_size > 0) {
691 void * iter = NULL;
692 do {
693 kern_collectth_state(current_thread(), buf, thread_state_size, &iter);
694
695 ret = callback(buf, context);
696 if (ret != KERN_SUCCESS) {
697 return ret;
698 }
699 } while (iter);
700 }
701
702 return KERN_SUCCESS;
703 }
704
705
706 static int
kern_dump_save_sw_vers_detail(__unused void * refcon,core_save_sw_vers_detail_cb callback,void * context)707 kern_dump_save_sw_vers_detail(__unused void *refcon, core_save_sw_vers_detail_cb callback, void *context)
708 {
709 return callback(vm_kernel_stext, kernel_uuid, 0, context);
710 }
711
712 static int
kern_dump_save_segment_data(__unused void * refcon,core_save_segment_data_cb callback,void * context)713 kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
714 {
715 vm_map_offset_t vstart = kdp_core_start_addr();
716 kern_return_t ret;
717 struct kern_dump_send_segdata_context kds_context;
718
719 kds_context.callback = callback;
720 kds_context.context = context;
721
722 ret = pmap_traverse_present_mappings(kernel_pmap,
723 vstart,
724 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
725 if (ret != KERN_SUCCESS) {
726 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
727 return ret;
728 }
729
730 return KERN_SUCCESS;
731 }
732
733 kern_return_t
kdp_reset_output_vars(void * kdp_core_out_state,uint64_t totalbytes,bool encrypt_core,bool * out_should_skip_coredump)734 kdp_reset_output_vars(void *kdp_core_out_state, uint64_t totalbytes, bool encrypt_core, bool *out_should_skip_coredump)
735 {
736 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
737 struct kdp_output_stage *current_stage = NULL;
738
739 /* Re-initialize kdp_outstate */
740 outstate->kcos_totalbytes = totalbytes;
741 outstate->kcos_bytes_written = 0;
742 outstate->kcos_lastpercent = 0;
743 outstate->kcos_error = KERN_SUCCESS;
744
745 /* Reset the output stages */
746 STAILQ_FOREACH(current_stage, &outstate->kcos_out_stage, kos_next) {
747 current_stage->kos_funcs.kosf_reset(current_stage);
748 }
749
750 *out_should_skip_coredump = false;
751 if (encrypt_core) {
752 if (outstate->kcos_enforce_encryption && !outstate->kcos_encryption_stage) {
753 *out_should_skip_coredump = true;
754 #if defined(__arm64__)
755 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
756 #else
757 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
758 #endif
759 kern_coredump_log(NULL, "(kdp_reset_output_vars) Encryption requested, is unavailable, and enforcement is active. Skipping current core.\n");
760 }
761 } else if (outstate->kcos_encryption_stage) {
762 outstate->kcos_encryption_stage->kos_bypass = true;
763 }
764
765 return KERN_SUCCESS;
766 }
767
768 static kern_return_t
kern_dump_update_header(struct kdp_core_out_state * outstate)769 kern_dump_update_header(struct kdp_core_out_state *outstate)
770 {
771 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
772 uint64_t foffset;
773 kern_return_t ret;
774
775 /* Write the file header -- first seek to the beginning of the file */
776 foffset = 0;
777 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
778 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
779 sizeof(foffset), &foffset, foffset, ret);
780 return ret;
781 }
782
783 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header_size, kdp_core_header)) != KERN_SUCCESS) {
784 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
785 kdp_core_header_size, kdp_core_header, ret);
786 return ret;
787 }
788
789 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
790 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
791 return ret;
792 }
793
794 #if defined(__arm64__)
795 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
796 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
797 return ret;
798 }
799 #endif /* defined(__arm64__) */
800
801 return ret;
802 }
803
804 kern_return_t
kern_dump_record_file(void * kdp_core_out_state,const char * filename,uint64_t file_offset,uint64_t * out_file_length,uint64_t details_flags)805 kern_dump_record_file(void *kdp_core_out_state, const char *filename, uint64_t file_offset, uint64_t *out_file_length, uint64_t details_flags)
806 {
807 kern_return_t ret = KERN_SUCCESS;
808 uint64_t bytes_written = 0;
809 struct mach_core_details_v2 *core_details = NULL;
810 struct kdp_output_stage *last_stage;
811 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
812
813 assert(kdp_core_header->num_files < KERN_COREDUMP_MAX_CORES);
814 assert(out_file_length != NULL);
815 *out_file_length = 0;
816
817 last_stage = STAILQ_LAST(&outstate->kcos_out_stage, kdp_output_stage, kos_next);
818 bytes_written = last_stage->kos_bytes_written;
819
820 core_details = &(kdp_core_header->files[kdp_core_header->num_files]);
821 core_details->flags = details_flags;
822 core_details->offset = file_offset;
823 core_details->length = bytes_written;
824 strncpy((char *)&core_details->core_name, filename,
825 MACH_CORE_FILEHEADER_NAMELEN);
826 core_details->core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
827
828 kdp_core_header->num_files++;
829
830 ret = kern_dump_update_header(outstate);
831 if (ret == KERN_SUCCESS) {
832 *out_file_length = bytes_written;
833 }
834
835 return ret;
836 }
837
838 kern_return_t
kern_dump_seek_to_next_file(void * kdp_core_out_state,uint64_t next_file_offset)839 kern_dump_seek_to_next_file(void *kdp_core_out_state, uint64_t next_file_offset)
840 {
841 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
842 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
843 kern_return_t ret;
844
845 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != KERN_SUCCESS) {
846 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
847 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
848 }
849
850 return ret;
851 }
852
853 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
854
855 static kern_return_t
kern_dump_write_public_key(struct kdp_core_out_state * outstate)856 kern_dump_write_public_key(struct kdp_core_out_state *outstate)
857 {
858 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
859 uint64_t foffset;
860 uint64_t remainder = PUBLIC_KEY_RESERVED_LENGTH - kdp_core_header->pub_key_length;
861 kern_return_t ret;
862
863 if (kdp_core_header->pub_key_offset == 0 || kdp_core_header->pub_key_length == 0) {
864 // Nothing to do
865 return KERN_SUCCESS;
866 }
867
868 /* Write the public key -- first seek to the appropriate offset */
869 foffset = kdp_core_header->pub_key_offset;
870 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
871 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
872 sizeof(foffset), &foffset, foffset, ret);
873 return ret;
874 }
875
876 // Write the public key
877 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
878 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
879 kdp_core_header->pub_key_length, kdp_core_public_key, ret);
880 return ret;
881 }
882
883 // Fill out the remainder of the block with zeroes
884 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
885 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
886 remainder, ret);
887 return ret;
888 }
889
890 // Do it once more to write the "next" public key
891 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
892 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
893 kdp_core_header->pub_key_length, kdp_core_public_key, ret);
894 return ret;
895 }
896
897 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
898 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
899 remainder, ret);
900 return ret;
901 }
902
903 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
904 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc data flush returned 0x%x\n", ret);
905 return ret;
906 }
907
908 #if defined(__arm64__)
909 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
910 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc explicit flush returned 0x%x\n", ret);
911 return ret;
912 }
913 #endif /* defined(__arm64__) */
914
915 return ret;
916 }
917
918 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
919
920 static kern_return_t
chain_output_stages(enum kern_dump_type kd_variant,struct kdp_core_out_state * outstate,uint64_t * details_flags)921 chain_output_stages(enum kern_dump_type kd_variant, struct kdp_core_out_state *outstate, uint64_t *details_flags)
922 {
923 struct kdp_output_stage *current = NULL;
924
925 assert(details_flags);
926 *details_flags = 0;
927
928 switch (kd_variant) {
929 case KERN_DUMP_STACKSHOT_DISK:
930 OS_FALLTHROUGH;
931 case KERN_DUMP_DISK:
932 #if defined(__arm64__)
933 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &memory_backing_aware_buffer_output_stage, kos_next);
934 #endif
935 if (!kdp_corezip_disabled) {
936 if (kdp_core_is_initializing_lz4_stage) {
937 kern_coredump_log(NULL, "We were in the middle of initializing LZ4 stage. Cannot write a coredump to disk\n");
938 return KERN_FAILURE;
939 } else if (!lz4_output_stage.kos_initialized) {
940 kern_coredump_log(NULL, "LZ4 stage is not yet initialized. Cannot write a coredump to disk\n");
941 return KERN_FAILURE;
942 }
943 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &lz4_output_stage, kos_next);
944 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_LZ4;
945 }
946 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
947 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
948 if (kdp_core_is_initializing_encryption_stage) {
949 kern_coredump_log(NULL, "We were in the middle of initializing encryption. Marking it as unavailable\n");
950 } else if (aea_output_stage.kos_initialized) {
951 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &aea_output_stage, kos_next);
952 outstate->kcos_encryption_stage = &aea_output_stage;
953 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_ENCRYPTED_AEA;
954 }
955 outstate->kcos_enforce_encryption = kern_dump_should_enforce_encryption();
956 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
957 if (kdp_core_is_initializing_disk_stage) {
958 kern_coredump_log(NULL, "We were in the middle of initializing the disk stage. Cannot write a coredump to disk\n");
959 return KERN_FAILURE;
960 } else if (disk_output_stage.kos_initialized == false) {
961 kern_coredump_log(NULL, "Corefile is not yet initialized. Cannot write a coredump to disk\n");
962 return KERN_FAILURE;
963 }
964 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &disk_output_stage, kos_next);
965 break;
966 case KERN_DUMP_NET:
967 if (!kdp_corezip_disabled) {
968 if (!zlib_output_stage.kos_initialized) {
969 kern_coredump_log(NULL, "Zlib stage is not initialized. Cannot write a coredump to the network\n");
970 return KERN_FAILURE;
971 }
972 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
973 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
974 }
975 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
976 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &buffer_output_stage, kos_next);
977 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &net_output_stage, kos_next);
978 break;
979 #if defined(__arm64__)
980 case KERN_DUMP_HW_SHMEM_DBG:
981 if (!kdp_corezip_disabled) {
982 if (!zlib_output_stage.kos_initialized) {
983 kern_coredump_log(NULL, "Zlib stage is not initialized. Cannot write a coredump to shared memory\n");
984 return KERN_FAILURE;
985 }
986 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
987 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
988 }
989 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &shmem_output_stage, kos_next);
990 break;
991 #endif /* defined(__arm64__) */
992 }
993
994 STAILQ_FOREACH(current, &outstate->kcos_out_stage, kos_next) {
995 current->kos_outstate = outstate;
996 }
997
998 return KERN_SUCCESS;
999 }
1000
1001 #if defined(__arm64__)
1002 static kern_return_t
dump_panic_buffer(struct kdp_core_out_state * outstate,char * panic_buf,size_t panic_len,uint64_t * foffset,uint64_t details_flags)1003 dump_panic_buffer(struct kdp_core_out_state *outstate, char *panic_buf, size_t panic_len,
1004 uint64_t *foffset, uint64_t details_flags)
1005 {
1006 kern_return_t ret = KERN_SUCCESS;
1007 bool should_skip = false;
1008
1009 kern_coredump_log(NULL, "\nBeginning dump of panic region of size 0x%zx\n", panic_len);
1010
1011 ret = kdp_reset_output_vars(outstate, panic_len, true, &should_skip);
1012 if (KERN_SUCCESS != ret) {
1013 return ret;
1014 }
1015
1016 if (should_skip) {
1017 kern_coredump_log(NULL, "Skipping panic region dump\n");
1018 return ret;
1019 }
1020
1021 uint64_t compressed_panic_region_len = 0;
1022 ret = kdp_core_output(outstate, panic_len, panic_buf);
1023 if (KERN_SUCCESS != ret) {
1024 kern_coredump_log(NULL, "Failed to write panic region to file, kdp_coreoutput(outstate, %zu, %p) returned 0x%x\n",
1025 panic_len, panic_buf, ret);
1026 return ret;
1027 }
1028
1029 ret = kdp_core_output(outstate, 0, NULL);
1030 if (KERN_SUCCESS != ret) {
1031 kern_coredump_log(NULL, "Failed to flush panic region data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", outstate, ret);
1032 return ret;
1033 }
1034
1035 ret = kern_dump_record_file(outstate, "panic_region", *foffset, &compressed_panic_region_len,
1036 details_flags);
1037 if (KERN_SUCCESS != ret) {
1038 kern_coredump_log(NULL, "Failed to record panic region in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1039 return ret;
1040 }
1041
1042 kern_coredump_log(NULL, "Recorded panic region in corefile at offset 0x%llx, compressed to %llu bytes\n", *foffset, compressed_panic_region_len);
1043 *foffset = roundup((*foffset + compressed_panic_region_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1044
1045 ret = kern_dump_seek_to_next_file(outstate, *foffset);
1046 if (KERN_SUCCESS != ret) {
1047 kern_coredump_log(NULL, "Failed to seek to panic region file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", *foffset, ret);
1048 return ret;
1049 }
1050
1051 return ret;
1052 }
1053 #endif /* defined(__arm64__) */
1054
1055 static int
do_kern_dump(enum kern_dump_type kd_variant)1056 do_kern_dump(enum kern_dump_type kd_variant)
1057 {
1058 struct kdp_core_out_state outstate = { };
1059 struct kdp_output_stage *first_stage = NULL;
1060 char *coredump_log_start = NULL, *buf = NULL;
1061 size_t reserved_debug_logsize = 0, prior_debug_logsize = 0;
1062 uint64_t foffset = 0;
1063 kern_return_t ret = KERN_SUCCESS;
1064 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
1065 uint64_t details_flags = 0;
1066
1067 /* Initialize output context */
1068
1069 bzero(&outstate, sizeof(outstate));
1070 STAILQ_INIT(&outstate.kcos_out_stage);
1071 ret = chain_output_stages(kd_variant, &outstate, &details_flags);
1072 if (KERN_SUCCESS != ret) {
1073 dump_succeeded = FALSE;
1074 goto exit;
1075 }
1076 first_stage = STAILQ_FIRST(&outstate.kcos_out_stage);
1077
1078 /*
1079 * Record the initial panic log buffer length so we can dump the coredump log
1080 * and panic log to disk
1081 */
1082 coredump_log_start = debug_buf_ptr;
1083 #if defined(__arm64__)
1084 assert(panic_info->eph_other_log_offset != 0);
1085 assert(panic_info->eph_panic_log_len != 0);
1086 /* Include any data from before the panic log as well */
1087 prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1088 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1089 #else /* defined(__arm64__) */
1090 if (panic_info->mph_panic_log_offset != 0) {
1091 prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1092 panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
1093 }
1094 #endif /* defined(__arm64__) */
1095
1096 assert(prior_debug_logsize <= debug_buf_size);
1097
1098 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1099 /* Open the file for output */
1100 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_WRQ, NULL, 0, NULL)) != KERN_SUCCESS) {
1101 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1102 dump_succeeded = FALSE;
1103 goto exit;
1104 }
1105 }
1106 output_opened = true;
1107
1108 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1109 const size_t aligned_corefile_header_size = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1110 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1111 const size_t aligned_public_key_size = PUBLIC_KEY_RESERVED_LENGTH * 2;
1112 #else
1113 const size_t aligned_public_key_size = 0;
1114 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1115
1116 reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE;
1117
1118 /* Space for file header, public key, panic log, core log */
1119 foffset = roundup(aligned_corefile_header_size + aligned_public_key_size + reserved_debug_logsize, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1120 kdp_core_header->log_offset = aligned_corefile_header_size + aligned_public_key_size;
1121
1122 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1123 /* Write the public key */
1124 ret = kern_dump_write_public_key(&outstate);
1125 if (KERN_SUCCESS != ret) {
1126 kern_coredump_log(NULL, "(do_kern_dump write public key) returned 0x%x\n", ret);
1127 dump_succeeded = FALSE;
1128 goto exit;
1129 }
1130 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1131
1132 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1133 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1134 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1135 sizeof(foffset), &foffset, foffset, ret);
1136 dump_succeeded = FALSE;
1137 goto exit;
1138 }
1139 }
1140
1141 #if defined(__arm64__)
1142 flush_mmu_tlb();
1143 #endif
1144
1145 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores...\n" :
1146 "Transmitting kernel state, please wait:\n");
1147
1148 #if defined (__arm64__)
1149 char *panic_buf = (char *)gPanicBase;
1150 size_t panic_len = (vm_offset_t)debug_buf_ptr - gPanicBase;
1151 if (kd_variant == KERN_DUMP_DISK && (panic_buf && panic_len)) {
1152 ret = dump_panic_buffer(&outstate, panic_buf, panic_len, &foffset, details_flags);
1153 if (KERN_SUCCESS != ret) {
1154 dump_succeeded = FALSE;
1155 }
1156 }
1157 #endif
1158
1159 #if defined(__x86_64__)
1160 if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) {
1161 bool should_skip = false;
1162
1163 kern_coredump_log(NULL, "\nBeginning dump of kernel stackshot\n");
1164
1165 ret = kdp_reset_output_vars(&outstate, panic_stackshot_len, true, &should_skip);
1166
1167 if (ret != KERN_SUCCESS) {
1168 kern_coredump_log(NULL, "Failed to reset outstate for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1169 dump_succeeded = FALSE;
1170 } else if (!should_skip) {
1171 uint64_t compressed_stackshot_len = 0;
1172 if ((ret = kdp_core_output(&outstate, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1173 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outstate, %lu, %p) returned 0x%x\n",
1174 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
1175 dump_succeeded = FALSE;
1176 } else if ((ret = kdp_core_output(&outstate, 0, NULL)) != KERN_SUCCESS) {
1177 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outstate, ret);
1178 dump_succeeded = FALSE;
1179 } else if ((ret = kern_dump_record_file(&outstate, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len, details_flags)) != KERN_SUCCESS) {
1180 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1181 dump_succeeded = FALSE;
1182 } else {
1183 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1184 foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1185 if ((ret = kern_dump_seek_to_next_file(&outstate, foffset)) != KERN_SUCCESS) {
1186 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1187 dump_succeeded = FALSE;
1188 }
1189 }
1190 } else {
1191 kern_coredump_log(NULL, "Skipping stackshot dump\n");
1192 }
1193 }
1194 #endif
1195
1196 if (kd_variant == KERN_DUMP_DISK) {
1197 /*
1198 * Dump co-processors as well, foffset will be overwritten with the
1199 * offset of the next location in the file to be written to.
1200 */
1201 if (kern_do_coredump(&outstate, FALSE, foffset, &foffset, details_flags) != 0) {
1202 dump_succeeded = FALSE;
1203 }
1204 } else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) {
1205 /* Only the kernel */
1206 if (kern_do_coredump(&outstate, TRUE, foffset, &foffset, details_flags) != 0) {
1207 dump_succeeded = FALSE;
1208 }
1209 }
1210
1211 if (kd_variant == KERN_DUMP_DISK) {
1212 assert(reserved_debug_logsize != 0);
1213 size_t remaining_debug_logspace = reserved_debug_logsize;
1214
1215 /* Write the debug log -- first seek to the end of the corefile header */
1216 foffset = kdp_core_header->log_offset;
1217 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1218 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1219 sizeof(foffset), &foffset, foffset, ret);
1220 dump_succeeded = FALSE;
1221 goto exit;
1222 }
1223
1224 /* First flush the data from just the paniclog */
1225 size_t initial_log_length = 0;
1226 #if defined(__arm64__)
1227 initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1228 panic_info->eph_panic_log_len;
1229 #else
1230 if (panic_info->mph_panic_log_offset != 0) {
1231 initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1232 panic_info->mph_panic_log_len;
1233 }
1234 #endif
1235
1236 buf = debug_buf_base;
1237 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, initial_log_length, buf)) != KERN_SUCCESS) {
1238 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1239 initial_log_length, buf, ret);
1240 dump_succeeded = FALSE;
1241 goto exit;
1242 }
1243
1244 remaining_debug_logspace -= initial_log_length;
1245
1246 /* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1247 #if defined(__arm64__)
1248 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1249 #else
1250 /*
1251 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1252 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1253 * we began taking a coredump.
1254 */
1255 if (panic_info->mph_other_log_offset != 0) {
1256 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1257 } else {
1258 buf = coredump_log_start;
1259 }
1260 #endif
1261 assert(debug_buf_ptr >= buf);
1262
1263 size_t other_log_length = debug_buf_ptr - buf;
1264 if (other_log_length > remaining_debug_logspace) {
1265 other_log_length = remaining_debug_logspace;
1266 }
1267
1268 /* Write the coredump log */
1269 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, other_log_length, buf)) != KERN_SUCCESS) {
1270 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1271 other_log_length, buf, ret);
1272 dump_succeeded = FALSE;
1273 goto exit;
1274 }
1275
1276 kdp_core_header->log_length = initial_log_length + other_log_length;
1277 kern_dump_update_header(&outstate);
1278 }
1279
1280 exit:
1281 /* close / last packet */
1282 if (output_opened && (ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_EOF, NULL, 0, ((void *) 0))) != KERN_SUCCESS) {
1283 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1284 dump_succeeded = FALSE;
1285 }
1286
1287 /* If applicable, update the panic header and flush it so we update the CRC */
1288 #if defined(__arm64__)
1289 panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1290 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1291 paniclog_flush();
1292 #else
1293 if (panic_info->mph_panic_log_offset != 0) {
1294 panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1295 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1296 paniclog_flush();
1297 }
1298 #endif
1299
1300 return dump_succeeded ? 0 : -1;
1301 }
1302
1303 boolean_t
dumped_kernel_core(void)1304 dumped_kernel_core(void)
1305 {
1306 return kern_dump_successful;
1307 }
1308
1309 int
kern_dump(enum kern_dump_type kd_variant)1310 kern_dump(enum kern_dump_type kd_variant)
1311 {
1312 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1313 int ret = -1;
1314 #if KASAN
1315 kasan_kdp_disable();
1316 #endif
1317 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1318 if (dumped_local) {
1319 return 0;
1320 }
1321 if (local_dump_in_progress) {
1322 return -1;
1323 }
1324 local_dump_in_progress = TRUE;
1325 ret = do_kern_dump(kd_variant);
1326 if (ret == 0) {
1327 dumped_local = TRUE;
1328 kern_dump_successful = TRUE;
1329 local_dump_in_progress = FALSE;
1330 }
1331
1332 return ret;
1333 #if defined(__arm64__)
1334 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1335 ret = do_kern_dump(kd_variant);
1336 if (ret == 0) {
1337 kern_dump_successful = TRUE;
1338 }
1339 return ret;
1340 #endif
1341 } else {
1342 ret = do_kern_dump(kd_variant);
1343 if (ret == 0) {
1344 kern_dump_successful = TRUE;
1345 }
1346 return ret;
1347 }
1348 }
1349
1350 static kern_return_t
kdp_core_init_output_stages(void)1351 kdp_core_init_output_stages(void)
1352 {
1353 kern_return_t ret = KERN_SUCCESS;
1354
1355 // We only zero-out the disk stage. It will be initialized
1356 // later on when the corefile is initialized
1357 bzero(&disk_output_stage, sizeof(disk_output_stage));
1358
1359 // We only zero-out the LZ4 stage. It will be initialized
1360 // later on when the kext is loaded.
1361 bzero(&lz4_output_stage, sizeof(lz4_output_stage));
1362 lz4_stage_monitor_availability();
1363
1364 // We only initialize the zlib output stage if we can reach the debugger.
1365 // This saves us from wasting some wired memory that will never be used
1366 // in other configurations.
1367 bzero(&zlib_output_stage, sizeof(zlib_output_stage));
1368 if (debug_boot_arg && (debug_boot_arg & DB_REBOOT_ALWAYS) == 0) {
1369 ret = zlib_stage_initialize(&zlib_output_stage);
1370 if (KERN_SUCCESS != ret) {
1371 return ret;
1372 }
1373 }
1374
1375 bzero(&buffer_output_stage, sizeof(buffer_output_stage));
1376 ret = buffer_stage_initialize(&buffer_output_stage, kdp_crashdump_pkt_size);
1377 if (KERN_SUCCESS != ret) {
1378 return ret;
1379 }
1380
1381 bzero(&net_output_stage, sizeof(net_output_stage));
1382 ret = net_stage_initialize(&net_output_stage);
1383 if (KERN_SUCCESS != ret) {
1384 return ret;
1385 }
1386
1387 bzero(&progress_notify_output_stage, sizeof(progress_notify_output_stage));
1388 ret = progress_notify_stage_initialize(&progress_notify_output_stage);
1389 if (KERN_SUCCESS != ret) {
1390 return ret;
1391 }
1392
1393 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1394 // We only zero-out the AEA stage. It will be initialized
1395 // later on, if it's supported and needed
1396 bzero(&aea_output_stage, sizeof(aea_output_stage));
1397 aea_stage_monitor_availability();
1398 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1399
1400 #if defined(__arm64__)
1401 bzero(&shmem_output_stage, sizeof(shmem_output_stage));
1402 if (PE_consistent_debug_enabled() && PE_i_can_has_debugger(NULL)) {
1403 ret = shmem_stage_initialize(&shmem_output_stage);
1404 if (KERN_SUCCESS != ret) {
1405 return ret;
1406 }
1407 }
1408 #endif /* defined(__arm64__) */
1409
1410 #if defined(__arm64__)
1411 bzero(&memory_backing_aware_buffer_output_stage, sizeof(memory_backing_aware_buffer_output_stage));
1412 ret = memory_backing_aware_buffer_stage_initialize(&memory_backing_aware_buffer_output_stage);
1413 if (KERN_SUCCESS != ret) {
1414 return ret;
1415 }
1416 #endif /* defined(__arm64__) */
1417
1418 return ret;
1419 }
1420
1421 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1422
1423 static bool
kern_dump_should_enforce_encryption(void)1424 kern_dump_should_enforce_encryption(void)
1425 {
1426 static int enforce_encryption = -1;
1427
1428 // Only check once
1429 if (enforce_encryption == -1) {
1430 uint32_t coredump_encryption_flags = 0;
1431
1432 // When set, the boot-arg is the sole decider
1433 if (!kernel_debugging_restricted() &&
1434 PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags))) {
1435 enforce_encryption = (coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT) != 0 ? 1 : 0;
1436 } else {
1437 enforce_encryption = 0;
1438 }
1439 }
1440
1441 return enforce_encryption != 0;
1442 }
1443
1444 static bool
kern_dump_is_encryption_available(void)1445 kern_dump_is_encryption_available(void)
1446 {
1447 // Default to feature enabled unless boot-arg says otherwise
1448 uint32_t coredump_encryption_flags = COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY;
1449
1450 if (!kernel_debugging_restricted()) {
1451 PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags));
1452 }
1453
1454 if ((coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY) == 0) {
1455 return false;
1456 }
1457
1458 return aea_stage_is_available();
1459 }
1460
1461 /*
1462 * Initialize (or de-initialize) the encryption stage. This is done in a way such that if initializing the
1463 * encryption stage with a new key fails, then the existing encryption stage is left untouched. Once
1464 * the new stage is initialized, the old stage is uninitialized.
1465 *
1466 * This function is called whenever we have a new public key (whether from someone calling our sysctl, or because
1467 * we read it out of a corefile), or when encryption becomes available.
1468 *
1469 * Parameters:
1470 * - public_key: The public key to use when initializing the encryption stage. Can be NULL to indicate that
1471 * the encryption stage should be de-initialized.
1472 * - public_key_size: The size of the given public key.
1473 */
1474 static kern_return_t
kdp_core_init_encryption_stage(void * public_key,size_t public_key_size)1475 kdp_core_init_encryption_stage(void *public_key, size_t public_key_size)
1476 {
1477 kern_return_t ret = KERN_SUCCESS;
1478 struct kdp_output_stage new_encryption_stage = {};
1479 struct kdp_output_stage old_encryption_stage = {};
1480
1481 lck_mtx_assert(kdp_core_encryption_stage_lock, LCK_MTX_ASSERT_OWNED);
1482
1483 bzero(&new_encryption_stage, sizeof(new_encryption_stage));
1484
1485 if (public_key && kern_dump_is_encryption_available()) {
1486 ret = aea_stage_initialize(&new_encryption_stage, public_key, public_key_size);
1487 if (KERN_SUCCESS != ret) {
1488 printf("(kdp_core_init_encryption_stage) Failed to initialize the encryption stage. Error 0x%x\n", ret);
1489 return ret;
1490 }
1491 }
1492
1493 bcopy(&aea_output_stage, &old_encryption_stage, sizeof(aea_output_stage));
1494
1495 bcopy(&new_encryption_stage, &aea_output_stage, sizeof(new_encryption_stage));
1496
1497 if (old_encryption_stage.kos_initialized && old_encryption_stage.kos_funcs.kosf_free) {
1498 old_encryption_stage.kos_funcs.kosf_free(&old_encryption_stage);
1499 }
1500
1501 return KERN_SUCCESS;
1502 }
1503
1504 kern_return_t
kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data,void * access_context,void * recipient_context)1505 kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context)
1506 {
1507 kern_return_t ret = KERN_SUCCESS;
1508 struct kdp_core_encryption_key_descriptor *key_descriptor = (struct kdp_core_encryption_key_descriptor *) recipient_context;
1509 void *old_public_key = NULL;
1510 size_t old_public_key_size = 0;
1511
1512 if (!key_descriptor) {
1513 return kIOReturnBadArgument;
1514 }
1515
1516 lck_mtx_lock(kdp_core_encryption_stage_lock);
1517 kdp_core_is_initializing_encryption_stage = true;
1518
1519 do {
1520 // Do the risky part first, and bail out cleanly if it fails
1521 ret = kdp_core_init_encryption_stage(key_descriptor->kcekd_key, key_descriptor->kcekd_size);
1522 if (ret != KERN_SUCCESS) {
1523 printf("kdp_core_handle_new_encryption_key failed to re-initialize encryption stage. Error 0x%x\n", ret);
1524 break;
1525 }
1526
1527 // The rest of this function should technically never fail
1528
1529 old_public_key = kdp_core_public_key;
1530 old_public_key_size = kdp_core_header->pub_key_length;
1531
1532 kdp_core_public_key = key_descriptor->kcekd_key;
1533 kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1534 kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_EXISTING_COREFILE_KEY_FORMAT_MASK;
1535 if (key_descriptor->kcekd_key) {
1536 kdp_core_header->flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1537 kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(key_descriptor->kcekd_format);
1538 kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1539 kdp_core_header->pub_key_length = key_descriptor->kcekd_size;
1540 } else {
1541 kdp_core_header->pub_key_offset = 0;
1542 kdp_core_header->pub_key_length = 0;
1543 }
1544
1545 /*
1546 * Return the old key to the caller to free
1547 */
1548 key_descriptor->kcekd_key = old_public_key;
1549 key_descriptor->kcekd_size = (uint16_t)old_public_key_size;
1550
1551 // If this stuff fails, we have bigger problems
1552 struct mach_core_fileheader_v2 existing_header;
1553 bool used_existing_header = false;
1554 ret = access_data(access_context, FALSE, 0, sizeof(existing_header), &existing_header);
1555 if (ret != KERN_SUCCESS) {
1556 printf("kdp_core_handle_new_encryption_key failed to read the existing corefile header. Error 0x%x\n", ret);
1557 break;
1558 }
1559
1560 if (existing_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1561 && existing_header.version == 2
1562 && (existing_header.pub_key_length == 0
1563 || kdp_core_header->pub_key_length == 0
1564 || existing_header.pub_key_length == kdp_core_header->pub_key_length)) {
1565 used_existing_header = true;
1566 existing_header.flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1567
1568 if (kdp_core_public_key) {
1569 existing_header.flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1570
1571 if (existing_header.pub_key_offset == 0) {
1572 existing_header.pub_key_offset = kdp_core_header->pub_key_offset;
1573 existing_header.pub_key_length = kdp_core_header->pub_key_length;
1574 }
1575 }
1576
1577 ret = access_data(access_context, TRUE, 0, sizeof(existing_header), &existing_header);
1578 if (ret != KERN_SUCCESS) {
1579 printf("kdp_core_handle_new_encryption_key failed to update the existing corefile header. Error 0x%x\n", ret);
1580 break;
1581 }
1582 } else {
1583 ret = access_data(access_context, TRUE, 0, sizeof(struct mach_core_fileheader_v2), kdp_core_header);
1584 if (ret != KERN_SUCCESS) {
1585 printf("kdp_core_handle_new_encryption_key failed to write the corefile header. Error 0x%x\n", ret);
1586 break;
1587 }
1588 }
1589
1590 if (kdp_core_header->pub_key_length) {
1591 uint64_t offset = used_existing_header ? existing_header.pub_key_offset : kdp_core_header->pub_key_offset;
1592 ret = access_data(access_context, TRUE, offset + PUBLIC_KEY_RESERVED_LENGTH, kdp_core_header->pub_key_length, kdp_core_public_key);
1593 if (ret != KERN_SUCCESS) {
1594 printf("kdp_core_handle_new_encryption_key failed to write the next public key. Error 0x%x\n", ret);
1595 break;
1596 }
1597
1598 if (!used_existing_header) {
1599 // Everything that happens here is optional. It's not the end of the world if this stuff fails, so we don't return
1600 // any errors
1601 // Since we're writing out a completely new header, we make sure to zero-out the region that's reserved for the public key.
1602 // This allows us consumers of the corefile to know for sure that this corefile is not encrypted (yet). Once we actually
1603 // write out a corefile, we'll overwrite this region with the key that we ended up using at the time.
1604 // If we fail to zero-out this region, consumers would read garbage data and properly fail to interpret it as a public key,
1605 // which is why it is OK for us to fail here (it's hard to interpret garbage data as a valid key, and even then, they wouldn't
1606 // find a matching private key anyway)
1607 void *empty_key = NULL;
1608 kern_return_t temp_ret = KERN_SUCCESS;
1609
1610 empty_key = kalloc_data(PUBLIC_KEY_RESERVED_LENGTH,
1611 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1612
1613 temp_ret = access_data(access_context, TRUE, offset, PUBLIC_KEY_RESERVED_LENGTH, empty_key);
1614 kfree_data(empty_key, PUBLIC_KEY_RESERVED_LENGTH);
1615
1616 if (temp_ret != KERN_SUCCESS) {
1617 printf("kdp_core_handle_new_encryption_key failed to zero-out the public key region. Error 0x%x\n", temp_ret);
1618 break;
1619 }
1620 }
1621 }
1622 } while (0);
1623
1624 kdp_core_is_initializing_encryption_stage = false;
1625 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1626
1627 return ret;
1628 }
1629
1630 kern_return_t
kdp_core_handle_encryption_available(void)1631 kdp_core_handle_encryption_available(void)
1632 {
1633 kern_return_t ret;
1634
1635 lck_mtx_lock(kdp_core_encryption_stage_lock);
1636 kdp_core_is_initializing_encryption_stage = true;
1637
1638 ret = kdp_core_init_encryption_stage(kdp_core_public_key, kdp_core_header->pub_key_length);
1639
1640 kdp_core_is_initializing_encryption_stage = false;
1641 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1642
1643 return ret;
1644 }
1645
1646 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1647
1648 kern_return_t
kdp_core_handle_lz4_available(void)1649 kdp_core_handle_lz4_available(void)
1650 {
1651 kern_return_t ret;
1652 lck_mtx_lock(kdp_core_lz4_stage_lock);
1653 kdp_core_is_initializing_lz4_stage = true;
1654
1655 ret = lz4_stage_initialize(&lz4_output_stage);
1656
1657 kdp_core_is_initializing_lz4_stage = false;
1658 lck_mtx_unlock(kdp_core_lz4_stage_lock);
1659
1660 return ret;
1661 }
1662
1663 kern_return_t
kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data,void * access_context,__unused void * recipient_context)1664 kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, __unused void *recipient_context)
1665 {
1666 kern_return_t ret = KERN_SUCCESS;
1667
1668 lck_mtx_lock(kdp_core_disk_stage_lock);
1669 kdp_core_is_initializing_disk_stage = true;
1670
1671 ret = disk_stage_initialize(&disk_output_stage);
1672
1673 kdp_core_is_initializing_disk_stage = false;
1674 lck_mtx_unlock(kdp_core_disk_stage_lock);
1675
1676 if (KERN_SUCCESS != ret) {
1677 return ret;
1678 }
1679
1680 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1681 // If someone has already provided a new public key,
1682 // there's no sense in reading the old one from the corefile.
1683 if (kdp_core_public_key != NULL) {
1684 return KERN_SUCCESS;
1685 }
1686
1687 // The kernel corefile is now available. Let's try to retrieve the public key from its
1688 // header (if available and supported).
1689
1690 // First let's read the corefile header itself
1691 struct mach_core_fileheader_v2 temp_header = {};
1692 ret = access_data(access_context, FALSE, 0, sizeof(temp_header), &temp_header);
1693 if (KERN_SUCCESS != ret) {
1694 printf("kdp_core_polled_io_polled_file_available failed to read corefile header. Error 0x%x\n", ret);
1695 return ret;
1696 }
1697
1698 // Check if the corefile header is initialized, and whether it's initialized to values that we support
1699 // (for backwards and forwards) compatibility, and check whether the header indicates that the corefile has
1700 // has a public key stashed inside of it.
1701 if (temp_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1702 && temp_header.version == 2
1703 && temp_header.pub_key_offset != 0
1704 && temp_header.pub_key_length != 0
1705 /* Future-proofing: make sure it's the key format that we support */
1706 && (temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK) == MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256
1707 /* Add some extra sanity checks. These are not necessary */
1708 && temp_header.pub_key_length <= 4096
1709 && temp_header.pub_key_offset < 65535) {
1710 // The corefile header is properly initialized, is supported, and contains a public key.
1711 // Let's adopt that public key for our encryption needs
1712 void *public_key = NULL;
1713
1714 public_key = kalloc_data(temp_header.pub_key_length,
1715 Z_ZERO | Z_WAITOK | Z_NOFAIL);
1716
1717 // Read the public key from the corefile. Note that the key we're trying to adopt is the "next" key, which is
1718 // PUBLIC_KEY_RESERVED_LENGTH bytes after the public key.
1719 ret = access_data(access_context, FALSE, temp_header.pub_key_offset + PUBLIC_KEY_RESERVED_LENGTH, temp_header.pub_key_length, public_key);
1720 if (KERN_SUCCESS != ret) {
1721 printf("kdp_core_polled_io_polled_file_available failed to read the public key. Error 0x%x\n", ret);
1722 kfree_data(public_key, temp_header.pub_key_length);
1723 return ret;
1724 }
1725
1726 lck_mtx_lock(kdp_core_encryption_stage_lock);
1727 kdp_core_is_initializing_encryption_stage = true;
1728
1729 ret = kdp_core_init_encryption_stage(public_key, temp_header.pub_key_length);
1730 if (KERN_SUCCESS == ret) {
1731 kdp_core_header->flags |= temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1732 kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(temp_header.flags);
1733 kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1734 kdp_core_header->pub_key_length = temp_header.pub_key_length;
1735 kdp_core_public_key = public_key;
1736 }
1737
1738 kdp_core_is_initializing_encryption_stage = false;
1739 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1740 }
1741 #else
1742 #pragma unused(access_data, access_context)
1743 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1744
1745 return ret;
1746 }
1747
1748 kern_return_t
kdp_core_polled_io_polled_file_unavailable(void)1749 kdp_core_polled_io_polled_file_unavailable(void)
1750 {
1751 lck_mtx_lock(kdp_core_disk_stage_lock);
1752 kdp_core_is_initializing_disk_stage = true;
1753
1754 if (disk_output_stage.kos_initialized && disk_output_stage.kos_funcs.kosf_free) {
1755 disk_output_stage.kos_funcs.kosf_free(&disk_output_stage);
1756 }
1757
1758 kdp_core_is_initializing_disk_stage = false;
1759 lck_mtx_unlock(kdp_core_disk_stage_lock);
1760
1761 return KERN_SUCCESS;
1762 }
1763
1764 void
kdp_core_init(void)1765 kdp_core_init(void)
1766 {
1767 kern_return_t kr;
1768 kern_coredump_callback_config core_config = { };
1769
1770 /* Initialize output stages */
1771 kr = kdp_core_init_output_stages();
1772 assert(KERN_SUCCESS == kr);
1773
1774 kmem_alloc(kernel_map, (vm_offset_t*)&kdp_core_header,
1775 kdp_core_header_size,
1776 KMA_NOFAIL | KMA_ZERO | KMA_PERMANENT | KMA_KOBJECT | KMA_DATA,
1777 VM_KERN_MEMORY_DIAG);
1778
1779 kdp_core_header->signature = MACH_CORE_FILEHEADER_V2_SIGNATURE;
1780 kdp_core_header->version = 2;
1781
1782 kdp_core_initialization_lock_group = lck_grp_alloc_init("KDPCoreStageInit", LCK_GRP_ATTR_NULL);
1783 kdp_core_disk_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1784
1785 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1786 kdp_core_encryption_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1787
1788 (void) kern_dump_should_enforce_encryption();
1789 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1790
1791 kdp_core_lz4_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1792
1793 core_config.kcc_coredump_init = kern_dump_init;
1794 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1795 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1796 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1797 core_config.kcc_coredump_save_sw_vers_detail = kern_dump_save_sw_vers_detail;
1798 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1799
1800 kr = kern_register_xnu_coredump_helper(&core_config);
1801 assert(KERN_SUCCESS == kr);
1802 }
1803
1804 #else
1805
1806 kern_return_t
kdp_core_exclude_region(__unused vm_offset_t addr,__unused vm_size_t size)1807 kdp_core_exclude_region(__unused vm_offset_t addr, __unused vm_size_t size)
1808 {
1809 return KERN_NOT_SUPPORTED;
1810 }
1811
1812 kern_return_t
kdp_core_unexclude_region(__unused vm_offset_t addr,__unused vm_size_t size)1813 kdp_core_unexclude_region(__unused vm_offset_t addr, __unused vm_size_t size)
1814 {
1815 return KERN_NOT_SUPPORTED;
1816 }
1817
1818 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1819