1 /*
2 * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * The main orchestrator for kernel (and co-processor) coredumps. Here's a very simplistic view of
31 * the flow:
32 *
33 * At kernel initialization time (kdp_core_init):
34 * ----------------------------------------------
35 *
36 * - kdp_core_init() takes care of allocating all necessary data structures and initializes the
37 * coredump output stages
38 *
39 * At coredump time (do_kern_dump):
40 * --------------------------------
41 *
42 * - Depending on the coredump variant, we chain the necessary output stages together in chain_output_stages()
43 * - [Disk only] We initialize the corefile header
44 * - [Disk only] We stream the stackshot out through the output stages and update the corefile header
45 * - We perform the kernel coredump, streaming it out through the output stages
46 * - [Disk only] We update the corefile header
47 * - [Disk only] We perform the co-processor coredumps (driven by kern_do_coredump), streaming each out
48 * through the output stages and updating the corefile header.
49 * - [Disk only] We save the coredump log to the corefile
50 */
51
52 #include <mach/kern_return.h>
53 #include <mach/vm_types.h>
54 #include <kdp/core_exclude.h>
55 #include <kdp/kdp_core.h>
56 #include <kdp/core_notes.h>
57
58 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
59
60 #include <mach/mach_types.h>
61 #include <mach/vm_attributes.h>
62 #include <mach/vm_param.h>
63 #include <mach/vm_map.h>
64 #include <vm/vm_protos.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_map.h>
67 #include <machine/cpu_capabilities.h>
68 #include <libsa/types.h>
69 #include <libkern/kernel_mach_header.h>
70 #include <kern/locks.h>
71 #include <kdp/kdp_internal.h>
72 #include <kdp/output_stages/output_stages.h>
73 #include <kdp/processor_core.h>
74 #include <IOKit/IOTypes.h>
75 #include <IOKit/IOBSD.h>
76 #include <sys/errno.h>
77 #include <sys/msgbuf.h>
78 #include <san/kasan.h>
79 #include <kern/debug.h>
80 #include <pexpert/pexpert.h>
81 #include <os/atomic_private.h>
82
83
84 #if defined(__x86_64__)
85 #include <i386/pmap_internal.h>
86 #include <kdp/ml/i386/kdp_x86_common.h>
87 #include <kern/debug.h>
88 #endif /* defined(__x86_64__) */
89
90
91 kern_return_t kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context);
92 kern_return_t kdp_core_polled_io_polled_file_unavailable(void);
93
94 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
95 vm_map_offset_t end,
96 void *context);
97
98 static kern_return_t kern_dump_init(void *refcon, void *context);
99 static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
100 static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
101 static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
102 static int kern_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context);
103 static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
104 static kern_return_t kern_dump_save_note_summary(void *refcon, core_save_note_summary_cb callback, void *context);
105 static kern_return_t kern_dump_save_note_descriptions(void *refcon, core_save_note_descriptions_cb callback, void *context);
106 static kern_return_t kern_dump_save_note_data(void *refcon, core_save_note_data_cb callback, void *context);
107
108 static int
109 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
110 vm_map_offset_t end,
111 void *context);
112 static int
113 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
114 vm_map_offset_t end,
115 void *context);
116
117 static int
118 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
119 vm_map_offset_t end,
120 void *context);
121
122 static struct kdp_output_stage disk_output_stage = {};
123 static struct kdp_output_stage lz4_output_stage = {};
124 static struct kdp_output_stage zlib_output_stage = {};
125 static struct kdp_output_stage buffer_output_stage = {};
126 static struct kdp_output_stage net_output_stage = {};
127 static struct kdp_output_stage progress_notify_output_stage = {};
128 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
129 static struct kdp_output_stage aea_output_stage = {};
130 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
131 #if defined(__arm64__)
132 static struct kdp_output_stage shmem_output_stage = {};
133 static struct kdp_output_stage memory_backing_aware_buffer_output_stage = {};
134 #endif /* defined(__arm64__) */
135
136 extern uint32_t kdp_crashdump_pkt_size;
137
138 static boolean_t kern_dump_successful = FALSE;
139
140 static const size_t kdp_core_header_size = sizeof(struct mach_core_fileheader_v2) + (KERN_COREDUMP_MAX_CORES * sizeof(struct mach_core_details_v2));
141 static struct mach_core_fileheader_v2 *kdp_core_header = NULL;
142
143 static lck_grp_t *kdp_core_initialization_lock_group = NULL;
144 static lck_mtx_t *kdp_core_disk_stage_lock = NULL;
145 static bool kdp_core_is_initializing_disk_stage = false;
146
147 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
148 static const size_t PUBLIC_KEY_RESERVED_LENGTH = roundup(4096, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
149 static void *kdp_core_public_key = NULL;
150 static lck_mtx_t *kdp_core_encryption_stage_lock = NULL;
151 static bool kdp_core_is_initializing_encryption_stage = false;
152
153 static bool kern_dump_should_enforce_encryption(void);
154 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
155
156 static lck_mtx_t *kdp_core_lz4_stage_lock = NULL;
157 static bool kdp_core_is_initializing_lz4_stage = false;
158
159 /*
160 * These variables will be modified by the BSD layer if the root device is
161 * a RAMDisk.
162 */
163 uint64_t kdp_core_ramdisk_addr = 0;
164 uint64_t kdp_core_ramdisk_size = 0;
165
166 #define COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY (1 << 0)
167 #define COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT (1 << 1)
168
169 boolean_t
kdp_has_polled_corefile(void)170 kdp_has_polled_corefile(void)
171 {
172 return NULL != gIOPolledCoreFileVars;
173 }
174
175 kern_return_t
kdp_polled_corefile_error(void)176 kdp_polled_corefile_error(void)
177 {
178 return gIOPolledCoreFileOpenRet;
179 }
180
181 IOPolledCoreFileMode_t
kdp_polled_corefile_mode(void)182 kdp_polled_corefile_mode(void)
183 {
184 return gIOPolledCoreFileMode;
185 }
186
187 struct kdp_core_excluded_region {
188 struct kdp_core_excluded_region *next;
189 vm_offset_t addr;
190 vm_size_t size;
191 };
192
193 static LCK_GRP_DECLARE(excluded_regions_grp, "kdp-exclude-regions");
194 static LCK_MTX_DECLARE(excluded_regions_mtx, &excluded_regions_grp);
195 static struct kdp_core_excluded_region *excluded_regions;
196
197 kern_return_t
kdp_core_exclude_region(vm_offset_t addr,vm_size_t size)198 kdp_core_exclude_region(vm_offset_t addr, vm_size_t size)
199 {
200 struct kdp_core_excluded_region *region;
201
202 if (addr >= addr + size) {
203 panic("%s: cannot exclude region starting at %p with size %zu (zero or overflowing size)",
204 __func__, (void*)addr, (size_t)size);
205 }
206 if (addr != round_page(addr) || size != round_page(size)) {
207 panic("%s: cannot exclude region starting at %p with size %zu (not page aligned)",
208 __func__, (void*)addr, (size_t)size);
209 }
210
211 region = kalloc_type(typeof(*region), Z_WAITOK | Z_NOFAIL);
212 region->addr = addr;
213 region->size = size;
214
215 lck_mtx_lock(&excluded_regions_mtx);
216 region->next = excluded_regions;
217 excluded_regions = region;
218 lck_mtx_unlock(&excluded_regions_mtx);
219
220 return KERN_SUCCESS;
221 }
222
223 kern_return_t
kdp_core_unexclude_region(vm_offset_t addr,vm_size_t size)224 kdp_core_unexclude_region(vm_offset_t addr, vm_size_t size)
225 {
226 struct kdp_core_excluded_region *region;
227 struct kdp_core_excluded_region **fixup = &excluded_regions;
228
229 lck_mtx_lock(&excluded_regions_mtx);
230 for (region = excluded_regions; region; region = region->next) {
231 if (region->addr == addr && region->size == size) {
232 *fixup = region->next;
233 break;
234 }
235 fixup = ®ion->next;
236 }
237 if (!region) {
238 panic("%s: cannot unexclude region starting at %p with size %zu (not currently excluded)",
239 __func__, (void*)addr, (size_t)size);
240 }
241 lck_mtx_unlock(&excluded_regions_mtx);
242
243 // We had exclusive access to the list when we removed the region, and it is no longer
244 // reachable from the list, so it is safe to free.
245 kfree_type(typeof(*region), region);
246
247 return KERN_SUCCESS;
248 }
249
250 static bool
kernel_vaddr_in_excluded_region(vm_offset_t addr,uint64_t * vincr)251 kernel_vaddr_in_excluded_region(vm_offset_t addr, uint64_t *vincr)
252 {
253 struct kdp_core_excluded_region *region;
254
255 // We check this earlier before attempting to dump the kernel, but verify here.
256 assert(!kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx));
257
258 for (region = excluded_regions; region; region = region->next) {
259 if (region->addr <= addr && addr < (region->addr + region->size)) {
260 *vincr = region->size;
261 return true;
262 }
263 }
264
265 return false;
266 }
267
268 kern_return_t
kdp_core_output(void * kdp_core_out_state,uint64_t length,void * data)269 kdp_core_output(void *kdp_core_out_state, uint64_t length, void * data)
270 {
271 kern_return_t err = KERN_SUCCESS;
272 uint64_t percent;
273 struct kdp_core_out_state *vars = (struct kdp_core_out_state *)kdp_core_out_state;
274 struct kdp_output_stage *first_stage = STAILQ_FIRST(&vars->kcos_out_stage);
275
276 if (vars->kcos_error == KERN_SUCCESS) {
277 #if DEVELOPMENT || DEBUG
278 // panic testing: force the write to fail after X number of writes
279 if ((panic_test_case & PANIC_TEST_CASE_COREFILE_IO_ERR) && (--panic_test_action_count == 0)) {
280 panic_test_case &= ~PANIC_TEST_CASE_COREFILE_IO_ERR;
281 length = -1;
282 }
283 #endif
284
285 if ((err = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, length, data)) != KERN_SUCCESS) {
286 kern_coredump_log(NULL, "(kdp_core_output) outproc(KDP_DATA, NULL, 0x%llx, %p) returned 0x%x\n",
287 length, data, err);
288 vars->kcos_error = err;
289 }
290 if (!data && !length) {
291 kern_coredump_log(NULL, "100..");
292 } else {
293 vars->kcos_bytes_written += length;
294 percent = (vars->kcos_bytes_written * 100) / vars->kcos_totalbytes;
295 if ((percent - vars->kcos_lastpercent) >= 10) {
296 vars->kcos_lastpercent = percent;
297 kern_coredump_log(NULL, "%lld..\n", percent);
298 }
299 }
300 }
301 return err;
302 }
303
304 #if defined(__arm64__)
305 extern pmap_paddr_t avail_start, avail_end;
306 extern struct vm_object pmap_object_store;
307 #endif
308 extern vm_offset_t c_buffers;
309 extern vm_size_t c_buffers_size;
310
311 static bool
kernel_vaddr_in_coredump_stage(const struct kdp_output_stage * stage,uint64_t vaddr,uint64_t * vincr)312 kernel_vaddr_in_coredump_stage(const struct kdp_output_stage *stage, uint64_t vaddr, uint64_t *vincr)
313 {
314 uint64_t start_addr = (uint64_t)stage->kos_data;
315 uint64_t end_addr = start_addr + stage->kos_data_size;
316
317 if (!stage->kos_data) {
318 return false;
319 }
320
321 if (vaddr >= start_addr && vaddr < end_addr) {
322 *vincr = stage->kos_data_size - (vaddr - start_addr);
323 return true;
324 }
325
326 return false;
327 }
328
329 static bool
kernel_vaddr_in_coredump_stages(uint64_t vaddr,uint64_t * vincr)330 kernel_vaddr_in_coredump_stages(uint64_t vaddr, uint64_t *vincr)
331 {
332 if (kernel_vaddr_in_coredump_stage(&disk_output_stage, vaddr, vincr)) {
333 return true;
334 }
335
336 if (kernel_vaddr_in_coredump_stage(&lz4_output_stage, vaddr, vincr)) {
337 return true;
338 }
339
340 if (kernel_vaddr_in_coredump_stage(&zlib_output_stage, vaddr, vincr)) {
341 return true;
342 }
343
344 if (kernel_vaddr_in_coredump_stage(&buffer_output_stage, vaddr, vincr)) {
345 return true;
346 }
347
348 if (kernel_vaddr_in_coredump_stage(&net_output_stage, vaddr, vincr)) {
349 return true;
350 }
351
352 if (kernel_vaddr_in_coredump_stage(&progress_notify_output_stage, vaddr, vincr)) {
353 return true;
354 }
355
356 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
357 if (kernel_vaddr_in_coredump_stage(&aea_output_stage, vaddr, vincr)) {
358 return true;
359 }
360 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
361
362 #if defined(__arm64__)
363 if (kernel_vaddr_in_coredump_stage(&shmem_output_stage, vaddr, vincr)) {
364 return true;
365 }
366 #endif /* defined(__arm64__) */
367
368 #if defined(__arm64__)
369 if (kernel_vaddr_in_coredump_stage(&memory_backing_aware_buffer_output_stage, vaddr, vincr)) {
370 return true;
371 }
372 #endif /* defined(__arm64__) */
373
374 return false;
375 }
376
377 ppnum_t
kernel_pmap_present_mapping(uint64_t vaddr,uint64_t * pvincr,uintptr_t * pvphysaddr)378 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
379 {
380 ppnum_t ppn = 0;
381 uint64_t vincr = PAGE_SIZE_64;
382
383 assert(!(vaddr & PAGE_MASK_64));
384
385 /* VA ranges to exclude */
386 if (vaddr == c_buffers) {
387 /* compressor data */
388 ppn = 0;
389 vincr = c_buffers_size;
390 } else if (kernel_vaddr_in_coredump_stages(vaddr, &vincr)) {
391 /* coredump output stage working memory */
392 ppn = 0;
393 } else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) {
394 ppn = 0;
395 vincr = kdp_core_ramdisk_size;
396 } else
397 #if defined(__arm64__)
398 if (vaddr == phystokv(avail_start)) {
399 /* physical memory map */
400 ppn = 0;
401 vincr = (avail_end - avail_start);
402 } else
403 #endif /* defined(__arm64__) */
404 {
405 ppn = (pvphysaddr != NULL ?
406 pmap_find_phys(kernel_pmap, vaddr) :
407 pmap_find_phys_nofault(kernel_pmap, vaddr));
408 }
409
410 *pvincr = round_page_64(vincr);
411
412 if (ppn && pvphysaddr) {
413 uint64_t phys = ptoa_64(ppn);
414 if (physmap_enclosed(phys)) {
415 *pvphysaddr = phystokv(phys);
416 } else {
417 ppn = 0;
418 }
419 }
420
421 return ppn;
422 }
423
424 static int
pmap_traverse_present_mappings(pmap_t __unused pmap,vm_map_offset_t start,vm_map_offset_t end,pmap_traverse_callback callback,void * context)425 pmap_traverse_present_mappings(pmap_t __unused pmap,
426 vm_map_offset_t start,
427 vm_map_offset_t end,
428 pmap_traverse_callback callback,
429 void *context)
430 {
431 IOReturn ret;
432 vm_map_offset_t vcurstart, vcur;
433 uint64_t vincr = 0;
434 vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
435 vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
436 #if defined(XNU_TARGET_OS_BRIDGE)
437 vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base);
438 vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size));
439 #endif
440
441 boolean_t lastvavalid;
442 #if defined(__arm64__)
443 vm_page_t m = VM_PAGE_NULL;
444 #endif
445
446 #if defined(__x86_64__)
447 assert(!is_ept_pmap(pmap));
448 #endif
449
450 /* Assumes pmap is locked, or being called from the kernel debugger */
451
452 if (start > end) {
453 return KERN_INVALID_ARGUMENT;
454 }
455
456 ret = KERN_SUCCESS;
457 lastvavalid = FALSE;
458 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) {
459 ppnum_t ppn = 0;
460
461 #if defined(__arm64__)
462 /* We're at the start of the physmap, so pull out the pagetable pages that
463 * are accessed through that region.*/
464 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) {
465 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
466 }
467
468 if (m != VM_PAGE_NULL) {
469 vm_map_offset_t vprev = vcur;
470 ppn = (ppnum_t)atop(avail_end);
471 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) {
472 /* Ignore pages that come from the static region and have already been dumped.*/
473 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) {
474 ppn = VM_PAGE_GET_PHYS_PAGE(m);
475 break;
476 }
477 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
478 }
479 vincr = PAGE_SIZE_64;
480 if (ppn == atop(avail_end)) {
481 vm_object_unlock(&pmap_object_store);
482 m = VM_PAGE_NULL;
483 // avail_end is not a valid physical address,
484 // so phystokv(avail_end) may not produce the expected result.
485 vcur = phystokv(avail_start) + (avail_end - avail_start);
486 } else {
487 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
488 vcur = phystokv(ptoa(ppn));
489 }
490 if (vcur != vprev) {
491 ret = callback(vcurstart, vprev, context);
492 lastvavalid = FALSE;
493 }
494 }
495 if (m == VM_PAGE_NULL) {
496 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
497 }
498 #else /* defined(__arm64__) */
499 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
500 #endif
501 if (ppn != 0 && kernel_vaddr_in_excluded_region(vcur, &vincr)) {
502 /* excluded region */
503 ppn = 0;
504 }
505 if (ppn != 0) {
506 if (((vcur < debug_start) || (vcur >= debug_end))
507 && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
508 #if defined(XNU_TARGET_OS_BRIDGE)
509 // include the macOS panic region if it's mapped
510 && ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
511 #endif
512 ) {
513 /* not something we want */
514 ppn = 0;
515 }
516 /* include the phys carveout only if explictly marked */
517 if (debug_is_in_phys_carveout(vcur) &&
518 !debug_can_coredump_phys_carveout()) {
519 ppn = 0;
520 }
521 }
522
523 if (ppn != 0) {
524 if (!lastvavalid) {
525 /* Start of a new virtual region */
526 vcurstart = vcur;
527 lastvavalid = TRUE;
528 }
529 } else {
530 if (lastvavalid) {
531 /* end of a virtual region */
532 ret = callback(vcurstart, vcur, context);
533 lastvavalid = FALSE;
534 }
535
536 #if defined(__x86_64__)
537 /* Try to skip by 2MB if possible */
538 if ((vcur & PDMASK) == 0) {
539 pd_entry_t *pde;
540 pde = pmap_pde(pmap, vcur);
541 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
542 /* Make sure we wouldn't overflow */
543 if (vcur < (end - NBPD)) {
544 vincr = NBPD;
545 }
546 }
547 }
548 #endif /* defined(__x86_64__) */
549 }
550 vcur += vincr;
551 }
552
553 if ((ret == KERN_SUCCESS) && lastvavalid) {
554 /* send previous run */
555 ret = callback(vcurstart, vcur, context);
556 }
557
558 #if KASAN
559 if (ret == KERN_SUCCESS) {
560 ret = kasan_traverse_mappings(callback, context);
561 }
562 #endif
563
564 return ret;
565 }
566
567 struct kern_dump_preflight_context {
568 uint32_t region_count;
569 uint64_t dumpable_bytes;
570 };
571
572 int
kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)573 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
574 vm_map_offset_t end,
575 void *context)
576 {
577 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
578 IOReturn ret = KERN_SUCCESS;
579
580 kdc->region_count++;
581 kdc->dumpable_bytes += (end - start);
582
583 return ret;
584 }
585
586
587 struct kern_dump_send_seg_desc_context {
588 core_save_segment_descriptions_cb callback;
589 void *context;
590 };
591
592 int
kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)593 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
594 vm_map_offset_t end,
595 void *context)
596 {
597 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
598 uint64_t seg_start = (uint64_t) start;
599 uint64_t seg_end = (uint64_t) end;
600
601 return kds_context->callback(seg_start, seg_end, kds_context->context);
602 }
603
604 struct kern_dump_send_segdata_context {
605 core_save_segment_data_cb callback;
606 void *context;
607 };
608
609 int
kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)610 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
611 vm_map_offset_t end,
612 void *context)
613 {
614 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
615
616 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
617 }
618
619 static kern_return_t
kern_dump_init(__unused void * refcon,void * context)620 kern_dump_init(__unused void *refcon, void *context)
621 {
622 /* TODO: consider doing mmu flush from an init function */
623
624 // If excluded regions list is locked, it is unsafe to dump the kernel.
625 if (kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx)) {
626 kern_coredump_log(context, "%s: skipping kernel because excluded regions list is locked\n",
627 __func__);
628 #if defined(__arm64__)
629 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE;
630 #else
631 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE;
632 #endif
633 paniclog_flush();
634 return KERN_NODE_DOWN;
635 }
636
637 return KERN_SUCCESS;
638 }
639
640 static int
kern_dump_save_summary(__unused void * refcon,core_save_summary_cb callback,void * context)641 kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
642 {
643 struct kern_dump_preflight_context kdc_preflight = { };
644 uint64_t thread_state_size = 0, thread_count = 0;
645 vm_map_offset_t vstart = kdp_core_start_addr();
646 kern_return_t ret;
647
648 ret = pmap_traverse_present_mappings(kernel_pmap,
649 vstart,
650 VM_MAX_KERNEL_ADDRESS,
651 kern_dump_pmap_traverse_preflight_callback,
652 &kdc_preflight);
653 if (ret != KERN_SUCCESS) {
654 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
655 return ret;
656 }
657
658 kern_collectth_state_size(&thread_count, &thread_state_size);
659
660 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
661 thread_count, thread_state_size, 0, context);
662 return ret;
663 }
664
665 static int
kern_dump_save_seg_descriptions(__unused void * refcon,core_save_segment_descriptions_cb callback,void * context)666 kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
667 {
668 vm_map_offset_t vstart = kdp_core_start_addr();
669 kern_return_t ret;
670 struct kern_dump_send_seg_desc_context kds_context;
671
672 kds_context.callback = callback;
673 kds_context.context = context;
674
675 ret = pmap_traverse_present_mappings(kernel_pmap,
676 vstart,
677 VM_MAX_KERNEL_ADDRESS,
678 kern_dump_pmap_traverse_send_segdesc_callback,
679 &kds_context);
680 if (ret != KERN_SUCCESS) {
681 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
682 return ret;
683 }
684
685 return KERN_SUCCESS;
686 }
687
688 static int
kern_dump_save_thread_state(__unused void * refcon,void * buf,core_save_thread_state_cb callback,void * context)689 kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
690 {
691 kern_return_t ret;
692 uint64_t thread_state_size = 0, thread_count = 0;
693
694 kern_collectth_state_size(&thread_count, &thread_state_size);
695
696 if (thread_state_size > 0) {
697 void * iter = NULL;
698 do {
699 kern_collectth_state(current_thread(), buf, thread_state_size, &iter);
700
701 ret = callback(buf, context);
702 if (ret != KERN_SUCCESS) {
703 return ret;
704 }
705 } while (iter);
706 }
707
708 return KERN_SUCCESS;
709 }
710
711
712 static int
kern_dump_save_sw_vers_detail(__unused void * refcon,core_save_sw_vers_detail_cb callback,void * context)713 kern_dump_save_sw_vers_detail(__unused void *refcon, core_save_sw_vers_detail_cb callback, void *context)
714 {
715 return callback(vm_kernel_stext, kernel_uuid, 0, context);
716 }
717
718 static int
kern_dump_save_segment_data(__unused void * refcon,core_save_segment_data_cb callback,void * context)719 kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
720 {
721 vm_map_offset_t vstart = kdp_core_start_addr();
722 kern_return_t ret;
723 struct kern_dump_send_segdata_context kds_context;
724
725 kds_context.callback = callback;
726 kds_context.context = context;
727
728 ret = pmap_traverse_present_mappings(kernel_pmap,
729 vstart,
730 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
731 if (ret != KERN_SUCCESS) {
732 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
733 return ret;
734 }
735
736 return KERN_SUCCESS;
737 }
738
739 kern_return_t
kdp_reset_output_vars(void * kdp_core_out_state,uint64_t totalbytes,bool encrypt_core,bool * out_should_skip_coredump)740 kdp_reset_output_vars(void *kdp_core_out_state, uint64_t totalbytes, bool encrypt_core, bool *out_should_skip_coredump)
741 {
742 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
743 struct kdp_output_stage *current_stage = NULL;
744
745 /* Re-initialize kdp_outstate */
746 outstate->kcos_totalbytes = totalbytes;
747 outstate->kcos_bytes_written = 0;
748 outstate->kcos_lastpercent = 0;
749 outstate->kcos_error = KERN_SUCCESS;
750
751 /* Reset the output stages */
752 STAILQ_FOREACH(current_stage, &outstate->kcos_out_stage, kos_next) {
753 current_stage->kos_funcs.kosf_reset(current_stage);
754 }
755
756 *out_should_skip_coredump = false;
757 if (encrypt_core) {
758 if (outstate->kcos_enforce_encryption && !outstate->kcos_encryption_stage) {
759 *out_should_skip_coredump = true;
760 #if defined(__arm64__)
761 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
762 #else
763 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
764 #endif
765 kern_coredump_log(NULL, "(kdp_reset_output_vars) Encryption requested, is unavailable, and enforcement is active. Skipping current core.\n");
766 }
767 } else if (outstate->kcos_encryption_stage) {
768 outstate->kcos_encryption_stage->kos_bypass = true;
769 }
770
771 return KERN_SUCCESS;
772 }
773
774 static kern_return_t
kern_dump_update_header(struct kdp_core_out_state * outstate)775 kern_dump_update_header(struct kdp_core_out_state *outstate)
776 {
777 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
778 uint64_t foffset;
779 kern_return_t ret;
780
781 /* Write the file header -- first seek to the beginning of the file */
782 foffset = 0;
783 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
784 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
785 sizeof(foffset), &foffset, foffset, ret);
786 return ret;
787 }
788
789 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header_size, kdp_core_header)) != KERN_SUCCESS) {
790 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
791 kdp_core_header_size, kdp_core_header, ret);
792 return ret;
793 }
794
795 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
796 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
797 return ret;
798 }
799
800 #if defined(__arm64__)
801 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
802 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
803 return ret;
804 }
805 #endif /* defined(__arm64__) */
806
807 return ret;
808 }
809
810 kern_return_t
kern_dump_record_file(void * kdp_core_out_state,const char * filename,uint64_t file_offset,uint64_t * out_file_length,uint64_t details_flags)811 kern_dump_record_file(void *kdp_core_out_state, const char *filename, uint64_t file_offset, uint64_t *out_file_length, uint64_t details_flags)
812 {
813 kern_return_t ret = KERN_SUCCESS;
814 uint64_t bytes_written = 0;
815 struct mach_core_details_v2 *core_details = NULL;
816 struct kdp_output_stage *last_stage;
817 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
818
819 assert(kdp_core_header->num_files < KERN_COREDUMP_MAX_CORES);
820 assert(out_file_length != NULL);
821 *out_file_length = 0;
822
823 last_stage = STAILQ_LAST(&outstate->kcos_out_stage, kdp_output_stage, kos_next);
824 bytes_written = last_stage->kos_bytes_written;
825
826 core_details = &(kdp_core_header->files[kdp_core_header->num_files]);
827 core_details->flags = details_flags;
828 core_details->offset = file_offset;
829 core_details->length = bytes_written;
830 strncpy((char *)&core_details->core_name, filename,
831 MACH_CORE_FILEHEADER_NAMELEN);
832 core_details->core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
833
834 kdp_core_header->num_files++;
835
836 ret = kern_dump_update_header(outstate);
837 if (ret == KERN_SUCCESS) {
838 *out_file_length = bytes_written;
839 }
840
841 return ret;
842 }
843
844 kern_return_t
kern_dump_seek_to_next_file(void * kdp_core_out_state,uint64_t next_file_offset)845 kern_dump_seek_to_next_file(void *kdp_core_out_state, uint64_t next_file_offset)
846 {
847 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
848 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
849 kern_return_t ret;
850
851 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != KERN_SUCCESS) {
852 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
853 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
854 }
855
856 return ret;
857 }
858
859 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
860
861 static kern_return_t
kern_dump_write_public_key(struct kdp_core_out_state * outstate)862 kern_dump_write_public_key(struct kdp_core_out_state *outstate)
863 {
864 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
865 uint64_t foffset;
866 uint64_t remainder = PUBLIC_KEY_RESERVED_LENGTH - kdp_core_header->pub_key_length;
867 kern_return_t ret;
868
869 if (kdp_core_header->pub_key_offset == 0 || kdp_core_header->pub_key_length == 0) {
870 // Nothing to do
871 return KERN_SUCCESS;
872 }
873
874 /* Write the public key -- first seek to the appropriate offset */
875 foffset = kdp_core_header->pub_key_offset;
876 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
877 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
878 sizeof(foffset), &foffset, foffset, ret);
879 return ret;
880 }
881
882 // Write the public key
883 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
884 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
885 kdp_core_header->pub_key_length, kdp_core_public_key, ret);
886 return ret;
887 }
888
889 // Fill out the remainder of the block with zeroes
890 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
891 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
892 remainder, ret);
893 return ret;
894 }
895
896 // Do it once more to write the "next" public key
897 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
898 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
899 kdp_core_header->pub_key_length, kdp_core_public_key, ret);
900 return ret;
901 }
902
903 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
904 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
905 remainder, ret);
906 return ret;
907 }
908
909 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
910 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc data flush returned 0x%x\n", ret);
911 return ret;
912 }
913
914 #if defined(__arm64__)
915 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
916 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc explicit flush returned 0x%x\n", ret);
917 return ret;
918 }
919 #endif /* defined(__arm64__) */
920
921 return ret;
922 }
923
924 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
925
926 static kern_return_t
chain_output_stages(enum kern_dump_type kd_variant,struct kdp_core_out_state * outstate,uint64_t * details_flags)927 chain_output_stages(enum kern_dump_type kd_variant, struct kdp_core_out_state *outstate, uint64_t *details_flags)
928 {
929 struct kdp_output_stage *current = NULL;
930
931 assert(details_flags);
932 *details_flags = 0;
933
934 switch (kd_variant) {
935 case KERN_DUMP_STACKSHOT_DISK:
936 OS_FALLTHROUGH;
937 case KERN_DUMP_DISK:
938 #if defined(__arm64__)
939 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &memory_backing_aware_buffer_output_stage, kos_next);
940 #endif
941 if (!kdp_corezip_disabled) {
942 if (kdp_core_is_initializing_lz4_stage) {
943 kern_coredump_log(NULL, "We were in the middle of initializing LZ4 stage. Cannot write a coredump to disk\n");
944 return KERN_FAILURE;
945 } else if (!lz4_output_stage.kos_initialized) {
946 kern_coredump_log(NULL, "LZ4 stage is not yet initialized. Cannot write a coredump to disk\n");
947 return KERN_FAILURE;
948 }
949 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &lz4_output_stage, kos_next);
950 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_LZ4;
951 }
952 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
953 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
954 if (kdp_core_is_initializing_encryption_stage) {
955 kern_coredump_log(NULL, "We were in the middle of initializing encryption. Marking it as unavailable\n");
956 } else if (aea_output_stage.kos_initialized) {
957 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &aea_output_stage, kos_next);
958 outstate->kcos_encryption_stage = &aea_output_stage;
959 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_ENCRYPTED_AEA;
960 }
961 outstate->kcos_enforce_encryption = kern_dump_should_enforce_encryption();
962 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
963 if (kdp_core_is_initializing_disk_stage) {
964 kern_coredump_log(NULL, "We were in the middle of initializing the disk stage. Cannot write a coredump to disk\n");
965 return KERN_FAILURE;
966 } else if (disk_output_stage.kos_initialized == false) {
967 kern_coredump_log(NULL, "Corefile is not yet initialized. Cannot write a coredump to disk\n");
968 return KERN_FAILURE;
969 }
970 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &disk_output_stage, kos_next);
971 break;
972 case KERN_DUMP_NET:
973 if (!kdp_corezip_disabled) {
974 if (!zlib_output_stage.kos_initialized) {
975 kern_coredump_log(NULL, "Zlib stage is not initialized. Cannot write a coredump to the network\n");
976 return KERN_FAILURE;
977 }
978 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
979 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
980 }
981 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
982 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &buffer_output_stage, kos_next);
983 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &net_output_stage, kos_next);
984 break;
985 #if defined(__arm64__)
986 case KERN_DUMP_HW_SHMEM_DBG:
987 if (!kdp_corezip_disabled) {
988 if (!zlib_output_stage.kos_initialized) {
989 kern_coredump_log(NULL, "Zlib stage is not initialized. Cannot write a coredump to shared memory\n");
990 return KERN_FAILURE;
991 }
992 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
993 *details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
994 }
995 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &shmem_output_stage, kos_next);
996 break;
997 #endif /* defined(__arm64__) */
998 }
999
1000 STAILQ_FOREACH(current, &outstate->kcos_out_stage, kos_next) {
1001 current->kos_outstate = outstate;
1002 }
1003
1004 return KERN_SUCCESS;
1005 }
1006
1007 #if defined(__arm64__)
1008 static kern_return_t
dump_panic_buffer(struct kdp_core_out_state * outstate,char * panic_buf,size_t panic_len,uint64_t * foffset,uint64_t details_flags)1009 dump_panic_buffer(struct kdp_core_out_state *outstate, char *panic_buf, size_t panic_len,
1010 uint64_t *foffset, uint64_t details_flags)
1011 {
1012 kern_return_t ret = KERN_SUCCESS;
1013 bool should_skip = false;
1014
1015 kern_coredump_log(NULL, "\nBeginning dump of panic region of size 0x%zx\n", panic_len);
1016
1017 ret = kdp_reset_output_vars(outstate, panic_len, true, &should_skip);
1018 if (KERN_SUCCESS != ret) {
1019 return ret;
1020 }
1021
1022 if (should_skip) {
1023 kern_coredump_log(NULL, "Skipping panic region dump\n");
1024 return ret;
1025 }
1026
1027 uint64_t compressed_panic_region_len = 0;
1028 ret = kdp_core_output(outstate, panic_len, panic_buf);
1029 if (KERN_SUCCESS != ret) {
1030 kern_coredump_log(NULL, "Failed to write panic region to file, kdp_coreoutput(outstate, %zu, %p) returned 0x%x\n",
1031 panic_len, panic_buf, ret);
1032 return ret;
1033 }
1034
1035 ret = kdp_core_output(outstate, 0, NULL);
1036 if (KERN_SUCCESS != ret) {
1037 kern_coredump_log(NULL, "Failed to flush panic region data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", outstate, ret);
1038 return ret;
1039 }
1040
1041 ret = kern_dump_record_file(outstate, "panic_region", *foffset, &compressed_panic_region_len,
1042 details_flags);
1043 if (KERN_SUCCESS != ret) {
1044 kern_coredump_log(NULL, "Failed to record panic region in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1045 return ret;
1046 }
1047
1048 kern_coredump_log(NULL, "Recorded panic region in corefile at offset 0x%llx, compressed to %llu bytes\n", *foffset, compressed_panic_region_len);
1049 *foffset = roundup((*foffset + compressed_panic_region_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1050
1051 ret = kern_dump_seek_to_next_file(outstate, *foffset);
1052 if (KERN_SUCCESS != ret) {
1053 kern_coredump_log(NULL, "Failed to seek to panic region file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", *foffset, ret);
1054 return ret;
1055 }
1056
1057 return ret;
1058 }
1059 #endif /* defined(__arm64__) */
1060
1061 static int
do_kern_dump(enum kern_dump_type kd_variant)1062 do_kern_dump(enum kern_dump_type kd_variant)
1063 {
1064 struct kdp_core_out_state outstate = { };
1065 struct kdp_output_stage *first_stage = NULL;
1066 char *coredump_log_start = NULL, *buf = NULL;
1067 size_t reserved_debug_logsize = 0, prior_debug_logsize = 0;
1068 uint64_t foffset = 0;
1069 kern_return_t ret = KERN_SUCCESS;
1070 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
1071 uint64_t details_flags = 0;
1072
1073 /* Initialize output context */
1074
1075 bzero(&outstate, sizeof(outstate));
1076 STAILQ_INIT(&outstate.kcos_out_stage);
1077 ret = chain_output_stages(kd_variant, &outstate, &details_flags);
1078 if (KERN_SUCCESS != ret) {
1079 dump_succeeded = FALSE;
1080 goto exit;
1081 }
1082 first_stage = STAILQ_FIRST(&outstate.kcos_out_stage);
1083
1084 /*
1085 * Record the initial panic log buffer length so we can dump the coredump log
1086 * and panic log to disk
1087 */
1088 coredump_log_start = debug_buf_ptr;
1089 #if defined(__arm64__)
1090 assert(panic_info->eph_other_log_offset != 0);
1091 assert(panic_info->eph_panic_log_len != 0);
1092 /* Include any data from before the panic log as well */
1093 prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1094 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1095 #else /* defined(__arm64__) */
1096 if (panic_info->mph_panic_log_offset != 0) {
1097 prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1098 panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
1099 }
1100 #endif /* defined(__arm64__) */
1101
1102 assert(prior_debug_logsize <= debug_buf_size);
1103
1104 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1105 /* Open the file for output */
1106 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_WRQ, NULL, 0, NULL)) != KERN_SUCCESS) {
1107 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1108 dump_succeeded = FALSE;
1109 goto exit;
1110 }
1111 }
1112 output_opened = true;
1113
1114 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1115 const size_t aligned_corefile_header_size = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1116 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1117 const size_t aligned_public_key_size = PUBLIC_KEY_RESERVED_LENGTH * 2;
1118 #else
1119 const size_t aligned_public_key_size = 0;
1120 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1121
1122 reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE;
1123
1124 /* Space for file header, public key, panic log, core log */
1125 foffset = roundup(aligned_corefile_header_size + aligned_public_key_size + reserved_debug_logsize, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1126 kdp_core_header->log_offset = aligned_corefile_header_size + aligned_public_key_size;
1127
1128 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1129 /* Write the public key */
1130 ret = kern_dump_write_public_key(&outstate);
1131 if (KERN_SUCCESS != ret) {
1132 kern_coredump_log(NULL, "(do_kern_dump write public key) returned 0x%x\n", ret);
1133 dump_succeeded = FALSE;
1134 goto exit;
1135 }
1136 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1137
1138 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1139 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1140 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1141 sizeof(foffset), &foffset, foffset, ret);
1142 dump_succeeded = FALSE;
1143 goto exit;
1144 }
1145 }
1146
1147 #if defined(__arm64__)
1148 flush_mmu_tlb();
1149 #endif
1150
1151 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores...\n" :
1152 "Transmitting kernel state, please wait:\n");
1153
1154 #if defined (__arm64__)
1155 char *panic_buf = (char *)gPanicBase;
1156 size_t panic_len = (vm_offset_t)debug_buf_ptr - gPanicBase;
1157 if (kd_variant == KERN_DUMP_DISK && (panic_buf && panic_len)) {
1158 ret = dump_panic_buffer(&outstate, panic_buf, panic_len, &foffset, details_flags);
1159 if (KERN_SUCCESS != ret) {
1160 dump_succeeded = FALSE;
1161 }
1162 }
1163 #endif
1164
1165 #if defined(__x86_64__)
1166 if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) {
1167 bool should_skip = false;
1168
1169 kern_coredump_log(NULL, "\nBeginning dump of kernel stackshot\n");
1170
1171 ret = kdp_reset_output_vars(&outstate, panic_stackshot_len, true, &should_skip);
1172
1173 if (ret != KERN_SUCCESS) {
1174 kern_coredump_log(NULL, "Failed to reset outstate for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1175 dump_succeeded = FALSE;
1176 } else if (!should_skip) {
1177 uint64_t compressed_stackshot_len = 0;
1178 if ((ret = kdp_core_output(&outstate, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1179 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outstate, %lu, %p) returned 0x%x\n",
1180 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
1181 dump_succeeded = FALSE;
1182 } else if ((ret = kdp_core_output(&outstate, 0, NULL)) != KERN_SUCCESS) {
1183 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outstate, ret);
1184 dump_succeeded = FALSE;
1185 } else if ((ret = kern_dump_record_file(&outstate, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len, details_flags)) != KERN_SUCCESS) {
1186 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1187 dump_succeeded = FALSE;
1188 } else {
1189 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1190 foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1191 if ((ret = kern_dump_seek_to_next_file(&outstate, foffset)) != KERN_SUCCESS) {
1192 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1193 dump_succeeded = FALSE;
1194 }
1195 }
1196 } else {
1197 kern_coredump_log(NULL, "Skipping stackshot dump\n");
1198 }
1199 }
1200 #endif
1201
1202 if (kd_variant == KERN_DUMP_DISK) {
1203 /*
1204 * Dump co-processors as well, foffset will be overwritten with the
1205 * offset of the next location in the file to be written to.
1206 */
1207 if (kern_do_coredump(&outstate, FALSE, foffset, &foffset, details_flags) != 0) {
1208 dump_succeeded = FALSE;
1209 }
1210 } else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) {
1211 /* Only the kernel */
1212 if (kern_do_coredump(&outstate, TRUE, foffset, &foffset, details_flags) != 0) {
1213 dump_succeeded = FALSE;
1214 }
1215 }
1216
1217 if (kd_variant == KERN_DUMP_DISK) {
1218 assert(reserved_debug_logsize != 0);
1219 size_t remaining_debug_logspace = reserved_debug_logsize;
1220
1221 /* Write the debug log -- first seek to the end of the corefile header */
1222 foffset = kdp_core_header->log_offset;
1223 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1224 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1225 sizeof(foffset), &foffset, foffset, ret);
1226 dump_succeeded = FALSE;
1227 goto exit;
1228 }
1229
1230 /* First flush the data from just the paniclog */
1231 size_t initial_log_length = 0;
1232 #if defined(__arm64__)
1233 initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1234 panic_info->eph_panic_log_len;
1235 #else
1236 if (panic_info->mph_panic_log_offset != 0) {
1237 initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1238 panic_info->mph_panic_log_len;
1239 }
1240 #endif
1241
1242 buf = debug_buf_base;
1243 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, initial_log_length, buf)) != KERN_SUCCESS) {
1244 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1245 initial_log_length, buf, ret);
1246 dump_succeeded = FALSE;
1247 goto exit;
1248 }
1249
1250 remaining_debug_logspace -= initial_log_length;
1251
1252 /* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1253 #if defined(__arm64__)
1254 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1255 #else
1256 /*
1257 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1258 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1259 * we began taking a coredump.
1260 */
1261 if (panic_info->mph_other_log_offset != 0) {
1262 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1263 } else {
1264 buf = coredump_log_start;
1265 }
1266 #endif
1267 assert(debug_buf_ptr >= buf);
1268
1269 size_t other_log_length = debug_buf_ptr - buf;
1270 if (other_log_length > remaining_debug_logspace) {
1271 other_log_length = remaining_debug_logspace;
1272 }
1273
1274 /* Write the coredump log */
1275 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, other_log_length, buf)) != KERN_SUCCESS) {
1276 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1277 other_log_length, buf, ret);
1278 dump_succeeded = FALSE;
1279 goto exit;
1280 }
1281
1282 kdp_core_header->log_length = initial_log_length + other_log_length;
1283 kern_dump_update_header(&outstate);
1284 }
1285
1286 exit:
1287 /* close / last packet */
1288 if (output_opened && (ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_EOF, NULL, 0, ((void *) 0))) != KERN_SUCCESS) {
1289 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1290 dump_succeeded = FALSE;
1291 }
1292
1293 /* If applicable, update the panic header and flush it so we update the CRC */
1294 #if defined(__arm64__)
1295 panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1296 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1297 paniclog_flush();
1298 #else
1299 if (panic_info->mph_panic_log_offset != 0) {
1300 panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1301 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1302 paniclog_flush();
1303 }
1304 #endif
1305
1306 return dump_succeeded ? 0 : -1;
1307 }
1308
1309 boolean_t
dumped_kernel_core(void)1310 dumped_kernel_core(void)
1311 {
1312 return kern_dump_successful;
1313 }
1314
1315 int
kern_dump(enum kern_dump_type kd_variant)1316 kern_dump(enum kern_dump_type kd_variant)
1317 {
1318 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1319 int ret = -1;
1320 #if KASAN
1321 kasan_kdp_disable();
1322 #endif
1323 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1324 if (dumped_local) {
1325 return 0;
1326 }
1327 if (local_dump_in_progress) {
1328 return -1;
1329 }
1330 local_dump_in_progress = TRUE;
1331 ret = do_kern_dump(kd_variant);
1332 if (ret == 0) {
1333 dumped_local = TRUE;
1334 kern_dump_successful = TRUE;
1335 local_dump_in_progress = FALSE;
1336 }
1337
1338 return ret;
1339 #if defined(__arm64__)
1340 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1341 ret = do_kern_dump(kd_variant);
1342 if (ret == 0) {
1343 kern_dump_successful = TRUE;
1344 }
1345 return ret;
1346 #endif
1347 } else {
1348 ret = do_kern_dump(kd_variant);
1349 if (ret == 0) {
1350 kern_dump_successful = TRUE;
1351 }
1352 return ret;
1353 }
1354 }
1355
1356 static kern_return_t
kdp_core_init_output_stages(void)1357 kdp_core_init_output_stages(void)
1358 {
1359 kern_return_t ret = KERN_SUCCESS;
1360
1361 // We only zero-out the disk stage. It will be initialized
1362 // later on when the corefile is initialized
1363 bzero(&disk_output_stage, sizeof(disk_output_stage));
1364
1365 // We only zero-out the LZ4 stage. It will be initialized
1366 // later on when the kext is loaded.
1367 bzero(&lz4_output_stage, sizeof(lz4_output_stage));
1368 lz4_stage_monitor_availability();
1369
1370 // We only initialize the zlib output stage if we can reach the debugger.
1371 // This saves us from wasting some wired memory that will never be used
1372 // in other configurations.
1373 bzero(&zlib_output_stage, sizeof(zlib_output_stage));
1374 if (debug_boot_arg && (debug_boot_arg & DB_REBOOT_ALWAYS) == 0) {
1375 ret = zlib_stage_initialize(&zlib_output_stage);
1376 if (KERN_SUCCESS != ret) {
1377 return ret;
1378 }
1379 }
1380
1381 bzero(&buffer_output_stage, sizeof(buffer_output_stage));
1382 ret = buffer_stage_initialize(&buffer_output_stage, kdp_crashdump_pkt_size);
1383 if (KERN_SUCCESS != ret) {
1384 return ret;
1385 }
1386
1387 bzero(&net_output_stage, sizeof(net_output_stage));
1388 ret = net_stage_initialize(&net_output_stage);
1389 if (KERN_SUCCESS != ret) {
1390 return ret;
1391 }
1392
1393 bzero(&progress_notify_output_stage, sizeof(progress_notify_output_stage));
1394 ret = progress_notify_stage_initialize(&progress_notify_output_stage);
1395 if (KERN_SUCCESS != ret) {
1396 return ret;
1397 }
1398
1399 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1400 // We only zero-out the AEA stage. It will be initialized
1401 // later on, if it's supported and needed
1402 bzero(&aea_output_stage, sizeof(aea_output_stage));
1403 aea_stage_monitor_availability();
1404 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1405
1406 #if defined(__arm64__)
1407 bzero(&shmem_output_stage, sizeof(shmem_output_stage));
1408 if (PE_consistent_debug_enabled() && PE_i_can_has_debugger(NULL)) {
1409 ret = shmem_stage_initialize(&shmem_output_stage);
1410 if (KERN_SUCCESS != ret) {
1411 return ret;
1412 }
1413 }
1414 #endif /* defined(__arm64__) */
1415
1416 #if defined(__arm64__)
1417 bzero(&memory_backing_aware_buffer_output_stage, sizeof(memory_backing_aware_buffer_output_stage));
1418 ret = memory_backing_aware_buffer_stage_initialize(&memory_backing_aware_buffer_output_stage);
1419 if (KERN_SUCCESS != ret) {
1420 return ret;
1421 }
1422 #endif /* defined(__arm64__) */
1423
1424 return ret;
1425 }
1426
1427 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1428
1429 static bool
kern_dump_should_enforce_encryption(void)1430 kern_dump_should_enforce_encryption(void)
1431 {
1432 static int enforce_encryption = -1;
1433
1434 // Only check once
1435 if (enforce_encryption == -1) {
1436 uint32_t coredump_encryption_flags = 0;
1437
1438 // When set, the boot-arg is the sole decider
1439 if (!kernel_debugging_restricted() &&
1440 PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags))) {
1441 enforce_encryption = (coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT) != 0 ? 1 : 0;
1442 } else {
1443 enforce_encryption = 0;
1444 }
1445 }
1446
1447 return enforce_encryption != 0;
1448 }
1449
1450 static bool
kern_dump_is_encryption_available(void)1451 kern_dump_is_encryption_available(void)
1452 {
1453 // Default to feature enabled unless boot-arg says otherwise
1454 uint32_t coredump_encryption_flags = COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY;
1455
1456 if (!kernel_debugging_restricted()) {
1457 PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags));
1458 }
1459
1460 if ((coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY) == 0) {
1461 return false;
1462 }
1463
1464 return aea_stage_is_available();
1465 }
1466
1467 /*
1468 * Initialize (or de-initialize) the encryption stage. This is done in a way such that if initializing the
1469 * encryption stage with a new key fails, then the existing encryption stage is left untouched. Once
1470 * the new stage is initialized, the old stage is uninitialized.
1471 *
1472 * This function is called whenever we have a new public key (whether from someone calling our sysctl, or because
1473 * we read it out of a corefile), or when encryption becomes available.
1474 *
1475 * Parameters:
1476 * - public_key: The public key to use when initializing the encryption stage. Can be NULL to indicate that
1477 * the encryption stage should be de-initialized.
1478 * - public_key_size: The size of the given public key.
1479 */
1480 static kern_return_t
kdp_core_init_encryption_stage(void * public_key,size_t public_key_size)1481 kdp_core_init_encryption_stage(void *public_key, size_t public_key_size)
1482 {
1483 kern_return_t ret = KERN_SUCCESS;
1484 struct kdp_output_stage new_encryption_stage = {};
1485 struct kdp_output_stage old_encryption_stage = {};
1486
1487 lck_mtx_assert(kdp_core_encryption_stage_lock, LCK_MTX_ASSERT_OWNED);
1488
1489 bzero(&new_encryption_stage, sizeof(new_encryption_stage));
1490
1491 if (public_key && kern_dump_is_encryption_available()) {
1492 ret = aea_stage_initialize(&new_encryption_stage, public_key, public_key_size);
1493 if (KERN_SUCCESS != ret) {
1494 printf("(kdp_core_init_encryption_stage) Failed to initialize the encryption stage. Error 0x%x\n", ret);
1495 return ret;
1496 }
1497 }
1498
1499 bcopy(&aea_output_stage, &old_encryption_stage, sizeof(aea_output_stage));
1500
1501 bcopy(&new_encryption_stage, &aea_output_stage, sizeof(new_encryption_stage));
1502
1503 if (old_encryption_stage.kos_initialized && old_encryption_stage.kos_funcs.kosf_free) {
1504 old_encryption_stage.kos_funcs.kosf_free(&old_encryption_stage);
1505 }
1506
1507 return KERN_SUCCESS;
1508 }
1509
1510 kern_return_t
kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data,void * access_context,void * recipient_context)1511 kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context)
1512 {
1513 kern_return_t ret = KERN_SUCCESS;
1514 struct kdp_core_encryption_key_descriptor *key_descriptor = (struct kdp_core_encryption_key_descriptor *) recipient_context;
1515 void *old_public_key = NULL;
1516 size_t old_public_key_size = 0;
1517
1518 if (!key_descriptor) {
1519 return kIOReturnBadArgument;
1520 }
1521
1522 lck_mtx_lock(kdp_core_encryption_stage_lock);
1523 kdp_core_is_initializing_encryption_stage = true;
1524
1525 do {
1526 // Do the risky part first, and bail out cleanly if it fails
1527 ret = kdp_core_init_encryption_stage(key_descriptor->kcekd_key, key_descriptor->kcekd_size);
1528 if (ret != KERN_SUCCESS) {
1529 printf("kdp_core_handle_new_encryption_key failed to re-initialize encryption stage. Error 0x%x\n", ret);
1530 break;
1531 }
1532
1533 // The rest of this function should technically never fail
1534
1535 old_public_key = kdp_core_public_key;
1536 old_public_key_size = kdp_core_header->pub_key_length;
1537
1538 kdp_core_public_key = key_descriptor->kcekd_key;
1539 kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1540 kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_EXISTING_COREFILE_KEY_FORMAT_MASK;
1541 if (key_descriptor->kcekd_key) {
1542 kdp_core_header->flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1543 kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(key_descriptor->kcekd_format);
1544 kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1545 kdp_core_header->pub_key_length = key_descriptor->kcekd_size;
1546 } else {
1547 kdp_core_header->pub_key_offset = 0;
1548 kdp_core_header->pub_key_length = 0;
1549 }
1550
1551 /*
1552 * Return the old key to the caller to free
1553 */
1554 key_descriptor->kcekd_key = old_public_key;
1555 key_descriptor->kcekd_size = (uint16_t)old_public_key_size;
1556
1557 // If this stuff fails, we have bigger problems
1558 struct mach_core_fileheader_v2 existing_header;
1559 bool used_existing_header = false;
1560 ret = access_data(access_context, FALSE, 0, sizeof(existing_header), &existing_header);
1561 if (ret != KERN_SUCCESS) {
1562 printf("kdp_core_handle_new_encryption_key failed to read the existing corefile header. Error 0x%x\n", ret);
1563 break;
1564 }
1565
1566 if (existing_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1567 && existing_header.version == 2
1568 && (existing_header.pub_key_length == 0
1569 || kdp_core_header->pub_key_length == 0
1570 || existing_header.pub_key_length == kdp_core_header->pub_key_length)) {
1571 used_existing_header = true;
1572 existing_header.flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1573
1574 if (kdp_core_public_key) {
1575 existing_header.flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1576
1577 if (existing_header.pub_key_offset == 0) {
1578 existing_header.pub_key_offset = kdp_core_header->pub_key_offset;
1579 existing_header.pub_key_length = kdp_core_header->pub_key_length;
1580 }
1581 }
1582
1583 ret = access_data(access_context, TRUE, 0, sizeof(existing_header), &existing_header);
1584 if (ret != KERN_SUCCESS) {
1585 printf("kdp_core_handle_new_encryption_key failed to update the existing corefile header. Error 0x%x\n", ret);
1586 break;
1587 }
1588 } else {
1589 ret = access_data(access_context, TRUE, 0, sizeof(struct mach_core_fileheader_v2), kdp_core_header);
1590 if (ret != KERN_SUCCESS) {
1591 printf("kdp_core_handle_new_encryption_key failed to write the corefile header. Error 0x%x\n", ret);
1592 break;
1593 }
1594 }
1595
1596 if (kdp_core_header->pub_key_length) {
1597 uint64_t offset = used_existing_header ? existing_header.pub_key_offset : kdp_core_header->pub_key_offset;
1598 ret = access_data(access_context, TRUE, offset + PUBLIC_KEY_RESERVED_LENGTH, kdp_core_header->pub_key_length, kdp_core_public_key);
1599 if (ret != KERN_SUCCESS) {
1600 printf("kdp_core_handle_new_encryption_key failed to write the next public key. Error 0x%x\n", ret);
1601 break;
1602 }
1603
1604 if (!used_existing_header) {
1605 // Everything that happens here is optional. It's not the end of the world if this stuff fails, so we don't return
1606 // any errors
1607 // Since we're writing out a completely new header, we make sure to zero-out the region that's reserved for the public key.
1608 // This allows us consumers of the corefile to know for sure that this corefile is not encrypted (yet). Once we actually
1609 // write out a corefile, we'll overwrite this region with the key that we ended up using at the time.
1610 // If we fail to zero-out this region, consumers would read garbage data and properly fail to interpret it as a public key,
1611 // which is why it is OK for us to fail here (it's hard to interpret garbage data as a valid key, and even then, they wouldn't
1612 // find a matching private key anyway)
1613 void *empty_key = NULL;
1614 kern_return_t temp_ret = KERN_SUCCESS;
1615
1616 empty_key = kalloc_data(PUBLIC_KEY_RESERVED_LENGTH,
1617 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1618
1619 temp_ret = access_data(access_context, TRUE, offset, PUBLIC_KEY_RESERVED_LENGTH, empty_key);
1620 kfree_data(empty_key, PUBLIC_KEY_RESERVED_LENGTH);
1621
1622 if (temp_ret != KERN_SUCCESS) {
1623 printf("kdp_core_handle_new_encryption_key failed to zero-out the public key region. Error 0x%x\n", temp_ret);
1624 break;
1625 }
1626 }
1627 }
1628 } while (0);
1629
1630 kdp_core_is_initializing_encryption_stage = false;
1631 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1632
1633 return ret;
1634 }
1635
1636 kern_return_t
kdp_core_handle_encryption_available(void)1637 kdp_core_handle_encryption_available(void)
1638 {
1639 kern_return_t ret;
1640
1641 lck_mtx_lock(kdp_core_encryption_stage_lock);
1642 kdp_core_is_initializing_encryption_stage = true;
1643
1644 ret = kdp_core_init_encryption_stage(kdp_core_public_key, kdp_core_header->pub_key_length);
1645
1646 kdp_core_is_initializing_encryption_stage = false;
1647 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1648
1649 return ret;
1650 }
1651
1652 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1653
1654 kern_return_t
kdp_core_handle_lz4_available(void)1655 kdp_core_handle_lz4_available(void)
1656 {
1657 kern_return_t ret;
1658 lck_mtx_lock(kdp_core_lz4_stage_lock);
1659 kdp_core_is_initializing_lz4_stage = true;
1660
1661 ret = lz4_stage_initialize(&lz4_output_stage);
1662
1663 kdp_core_is_initializing_lz4_stage = false;
1664 lck_mtx_unlock(kdp_core_lz4_stage_lock);
1665
1666 return ret;
1667 }
1668
1669 kern_return_t
kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data,void * access_context,__unused void * recipient_context)1670 kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, __unused void *recipient_context)
1671 {
1672 kern_return_t ret = KERN_SUCCESS;
1673
1674 lck_mtx_lock(kdp_core_disk_stage_lock);
1675 kdp_core_is_initializing_disk_stage = true;
1676
1677 ret = disk_stage_initialize(&disk_output_stage);
1678
1679 kdp_core_is_initializing_disk_stage = false;
1680 lck_mtx_unlock(kdp_core_disk_stage_lock);
1681
1682 if (KERN_SUCCESS != ret) {
1683 return ret;
1684 }
1685
1686 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1687 // If someone has already provided a new public key,
1688 // there's no sense in reading the old one from the corefile.
1689 if (kdp_core_public_key != NULL) {
1690 return KERN_SUCCESS;
1691 }
1692
1693 // The kernel corefile is now available. Let's try to retrieve the public key from its
1694 // header (if available and supported).
1695
1696 // First let's read the corefile header itself
1697 struct mach_core_fileheader_v2 temp_header = {};
1698 ret = access_data(access_context, FALSE, 0, sizeof(temp_header), &temp_header);
1699 if (KERN_SUCCESS != ret) {
1700 printf("kdp_core_polled_io_polled_file_available failed to read corefile header. Error 0x%x\n", ret);
1701 return ret;
1702 }
1703
1704 // Check if the corefile header is initialized, and whether it's initialized to values that we support
1705 // (for backwards and forwards) compatibility, and check whether the header indicates that the corefile has
1706 // has a public key stashed inside of it.
1707 if (temp_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1708 && temp_header.version == 2
1709 && temp_header.pub_key_offset != 0
1710 && temp_header.pub_key_length != 0
1711 /* Future-proofing: make sure it's the key format that we support */
1712 && (temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK) == MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256
1713 /* Add some extra sanity checks. These are not necessary */
1714 && temp_header.pub_key_length <= 4096
1715 && temp_header.pub_key_offset < 65535) {
1716 // The corefile header is properly initialized, is supported, and contains a public key.
1717 // Let's adopt that public key for our encryption needs
1718 void *public_key = NULL;
1719
1720 public_key = kalloc_data(temp_header.pub_key_length,
1721 Z_ZERO | Z_WAITOK | Z_NOFAIL);
1722
1723 // Read the public key from the corefile. Note that the key we're trying to adopt is the "next" key, which is
1724 // PUBLIC_KEY_RESERVED_LENGTH bytes after the public key.
1725 ret = access_data(access_context, FALSE, temp_header.pub_key_offset + PUBLIC_KEY_RESERVED_LENGTH, temp_header.pub_key_length, public_key);
1726 if (KERN_SUCCESS != ret) {
1727 printf("kdp_core_polled_io_polled_file_available failed to read the public key. Error 0x%x\n", ret);
1728 kfree_data(public_key, temp_header.pub_key_length);
1729 return ret;
1730 }
1731
1732 lck_mtx_lock(kdp_core_encryption_stage_lock);
1733 kdp_core_is_initializing_encryption_stage = true;
1734
1735 ret = kdp_core_init_encryption_stage(public_key, temp_header.pub_key_length);
1736 if (KERN_SUCCESS == ret) {
1737 kdp_core_header->flags |= temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1738 kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(temp_header.flags);
1739 kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1740 kdp_core_header->pub_key_length = temp_header.pub_key_length;
1741 kdp_core_public_key = public_key;
1742 }
1743
1744 kdp_core_is_initializing_encryption_stage = false;
1745 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1746 }
1747 #else
1748 #pragma unused(access_data, access_context)
1749 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1750
1751 return ret;
1752 }
1753
1754 kern_return_t
kdp_core_polled_io_polled_file_unavailable(void)1755 kdp_core_polled_io_polled_file_unavailable(void)
1756 {
1757 lck_mtx_lock(kdp_core_disk_stage_lock);
1758 kdp_core_is_initializing_disk_stage = true;
1759
1760 if (disk_output_stage.kos_initialized && disk_output_stage.kos_funcs.kosf_free) {
1761 disk_output_stage.kos_funcs.kosf_free(&disk_output_stage);
1762 }
1763
1764 kdp_core_is_initializing_disk_stage = false;
1765 lck_mtx_unlock(kdp_core_disk_stage_lock);
1766
1767 return KERN_SUCCESS;
1768 }
1769
1770 void
kdp_core_init(void)1771 kdp_core_init(void)
1772 {
1773 kern_return_t kr;
1774 kern_coredump_callback_config core_config = { };
1775
1776 /* Initialize output stages */
1777 kr = kdp_core_init_output_stages();
1778 assert(KERN_SUCCESS == kr);
1779
1780 kmem_alloc(kernel_map, (vm_offset_t*)&kdp_core_header,
1781 kdp_core_header_size,
1782 KMA_NOFAIL | KMA_ZERO | KMA_PERMANENT | KMA_KOBJECT | KMA_DATA,
1783 VM_KERN_MEMORY_DIAG);
1784
1785 kdp_core_header->signature = MACH_CORE_FILEHEADER_V2_SIGNATURE;
1786 kdp_core_header->version = 2;
1787
1788 kdp_core_initialization_lock_group = lck_grp_alloc_init("KDPCoreStageInit", LCK_GRP_ATTR_NULL);
1789 kdp_core_disk_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1790
1791 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1792 kdp_core_encryption_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1793
1794 (void) kern_dump_should_enforce_encryption();
1795 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1796
1797 kdp_core_lz4_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1798
1799 core_config.kcc_coredump_init = kern_dump_init;
1800 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1801 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1802 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1803 core_config.kcc_coredump_save_sw_vers_detail = kern_dump_save_sw_vers_detail;
1804 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1805 core_config.kcc_coredump_save_note_summary = kern_dump_save_note_summary;
1806 core_config.kcc_coredump_save_note_descriptions = kern_dump_save_note_descriptions;
1807 core_config.kcc_coredump_save_note_data = kern_dump_save_note_data;
1808
1809 kr = kern_register_xnu_coredump_helper(&core_config);
1810 assert(KERN_SUCCESS == kr);
1811 }
1812
1813 /*
1814 * Additional LC_NOTES added to the core.
1815 */
1816
1817 static kern_return_t
kern_dump_save_note_summary(void * refcon __unused,core_save_note_summary_cb callback,void * context)1818 kern_dump_save_note_summary(void *refcon __unused, core_save_note_summary_cb callback, void *context)
1819 {
1820 int count = 1;
1821 size_t size = sizeof(addrable_bits_note_t);
1822
1823
1824 return callback(count, size, context);
1825 }
1826
1827 static kern_return_t
kern_dump_save_note_descriptions(void * refcon __unused,core_save_note_descriptions_cb callback,void * context)1828 kern_dump_save_note_descriptions(void *refcon __unused, core_save_note_descriptions_cb callback, void *context)
1829 {
1830 int max_ret = KERN_SUCCESS;
1831 int ret;
1832
1833 max_ret = ret = callback(ADDRABLE_BITS_DATA_OWNER, sizeof(addrable_bits_note_t), context);
1834
1835
1836 return max_ret;
1837 }
1838
1839 static kern_return_t
kern_dump_save_note_data(void * refcon __unused,core_save_note_data_cb callback,void * context)1840 kern_dump_save_note_data(void *refcon __unused, core_save_note_data_cb callback, void *context)
1841 {
1842 int max_ret = KERN_SUCCESS;
1843 int ret;
1844
1845 addrable_bits_note_t note = {
1846 .version = ADDRABLE_BITS_VER,
1847 .addressing_bits = pmap_kernel_va_bits(),
1848 .unused = 0
1849 };
1850
1851 max_ret = ret = callback(¬e, sizeof(addrable_bits_note_t), context);
1852
1853
1854 return max_ret;
1855 }
1856
1857 #else
1858
1859 kern_return_t
kdp_core_exclude_region(__unused vm_offset_t addr,__unused vm_size_t size)1860 kdp_core_exclude_region(__unused vm_offset_t addr, __unused vm_size_t size)
1861 {
1862 return KERN_NOT_SUPPORTED;
1863 }
1864
1865 kern_return_t
kdp_core_unexclude_region(__unused vm_offset_t addr,__unused vm_size_t size)1866 kdp_core_unexclude_region(__unused vm_offset_t addr, __unused vm_size_t size)
1867 {
1868 return KERN_NOT_SUPPORTED;
1869 }
1870
1871 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1872