xref: /xnu-10002.81.5/osfmk/kdp/kdp_core.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * The main orchestrator for kernel (and co-processor) coredumps. Here's a very simplistic view of
31  * the flow:
32  *
33  * At kernel initialization time (kdp_core_init):
34  * ----------------------------------------------
35  *
36  * - kdp_core_init() takes care of allocating all necessary data structures and initializes the
37  *   coredump output stages
38  *
39  * At coredump time (do_kern_dump):
40  * --------------------------------
41  *
42  * - Depending on the coredump variant, we chain the necessary output stages together in chain_output_stages()
43  * - [Disk only] We initialize the corefile header
44  * - [Disk only] We stream the stackshot out through the output stages and update the corefile header
45  * - We perform the kernel coredump, streaming it out through the output stages
46  * - [Disk only] We update the corefile header
47  * - [Disk only] We perform the co-processor coredumps (driven by kern_do_coredump), streaming each out
48  *               through the output stages and updating the corefile header.
49  * - [Disk only] We save the coredump log to the corefile
50  */
51 
52 #include <mach/kern_return.h>
53 #include <mach/vm_types.h>
54 #include <kdp/core_exclude.h>
55 #include <kdp/kdp_core.h>
56 #include <kdp/core_notes.h>
57 
58 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
59 
60 #include <mach/mach_types.h>
61 #include <mach/vm_attributes.h>
62 #include <mach/vm_param.h>
63 #include <mach/vm_map.h>
64 #include <vm/vm_protos.h>
65 #include <vm/vm_kern.h>
66 #include <vm/vm_map.h>
67 #include <machine/cpu_capabilities.h>
68 #include <libsa/types.h>
69 #include <libkern/kernel_mach_header.h>
70 #include <kern/locks.h>
71 #include <kdp/kdp_internal.h>
72 #include <kdp/output_stages/output_stages.h>
73 #include <kdp/processor_core.h>
74 #include <IOKit/IOTypes.h>
75 #include <IOKit/IOBSD.h>
76 #include <sys/errno.h>
77 #include <sys/msgbuf.h>
78 #include <san/kasan.h>
79 #include <kern/debug.h>
80 #include <pexpert/pexpert.h>
81 #include <os/atomic_private.h>
82 
83 
84 #if defined(__x86_64__)
85 #include <i386/pmap_internal.h>
86 #include <kdp/ml/i386/kdp_x86_common.h>
87 #include <kern/debug.h>
88 #endif /* defined(__x86_64__) */
89 
90 
91 kern_return_t kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context);
92 kern_return_t kdp_core_polled_io_polled_file_unavailable(void);
93 
94 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
95     vm_map_offset_t end,
96     void *context);
97 
98 static kern_return_t kern_dump_init(void *refcon, void *context);
99 static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
100 static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
101 static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
102 static int kern_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context);
103 static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
104 static kern_return_t kern_dump_save_note_summary(void *refcon, core_save_note_summary_cb callback, void *context);
105 static kern_return_t kern_dump_save_note_descriptions(void *refcon, core_save_note_descriptions_cb callback, void *context);
106 static kern_return_t kern_dump_save_note_data(void *refcon, core_save_note_data_cb callback, void *context);
107 
108 static int
109 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
110     vm_map_offset_t end,
111     void *context);
112 static int
113 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
114     vm_map_offset_t end,
115     void *context);
116 
117 static int
118 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
119     vm_map_offset_t end,
120     void *context);
121 
122 static struct kdp_output_stage disk_output_stage = {};
123 static struct kdp_output_stage lz4_output_stage = {};
124 static struct kdp_output_stage zlib_output_stage = {};
125 static struct kdp_output_stage buffer_output_stage = {};
126 static struct kdp_output_stage net_output_stage = {};
127 static struct kdp_output_stage progress_notify_output_stage = {};
128 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
129 static struct kdp_output_stage aea_output_stage = {};
130 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
131 #if defined(__arm64__)
132 static struct kdp_output_stage shmem_output_stage = {};
133 static struct kdp_output_stage memory_backing_aware_buffer_output_stage = {};
134 #endif /* defined(__arm64__) */
135 
136 extern uint32_t kdp_crashdump_pkt_size;
137 
138 static boolean_t kern_dump_successful = FALSE;
139 
140 static const size_t kdp_core_header_size = sizeof(struct mach_core_fileheader_v2) + (KERN_COREDUMP_MAX_CORES * sizeof(struct mach_core_details_v2));
141 static struct mach_core_fileheader_v2 *kdp_core_header = NULL;
142 
143 static lck_grp_t *kdp_core_initialization_lock_group = NULL;
144 static lck_mtx_t *kdp_core_disk_stage_lock = NULL;
145 static bool kdp_core_is_initializing_disk_stage = false;
146 
147 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
148 static const size_t PUBLIC_KEY_RESERVED_LENGTH = roundup(4096, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
149 static void *kdp_core_public_key = NULL;
150 static lck_mtx_t *kdp_core_encryption_stage_lock = NULL;
151 static bool kdp_core_is_initializing_encryption_stage = false;
152 
153 static bool kern_dump_should_enforce_encryption(void);
154 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
155 
156 static lck_mtx_t *kdp_core_lz4_stage_lock = NULL;
157 static bool kdp_core_is_initializing_lz4_stage = false;
158 
159 /*
160  * These variables will be modified by the BSD layer if the root device is
161  * a RAMDisk.
162  */
163 uint64_t kdp_core_ramdisk_addr = 0;
164 uint64_t kdp_core_ramdisk_size = 0;
165 
166 #define COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY (1 << 0)
167 #define COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT  (1 << 1)
168 
169 boolean_t
kdp_has_polled_corefile(void)170 kdp_has_polled_corefile(void)
171 {
172 	return NULL != gIOPolledCoreFileVars;
173 }
174 
175 kern_return_t
kdp_polled_corefile_error(void)176 kdp_polled_corefile_error(void)
177 {
178 	return gIOPolledCoreFileOpenRet;
179 }
180 
181 IOPolledCoreFileMode_t
kdp_polled_corefile_mode(void)182 kdp_polled_corefile_mode(void)
183 {
184 	return gIOPolledCoreFileMode;
185 }
186 
187 struct kdp_core_excluded_region {
188 	struct kdp_core_excluded_region *next;
189 	vm_offset_t addr;
190 	vm_size_t size;
191 };
192 
193 static LCK_GRP_DECLARE(excluded_regions_grp, "kdp-exclude-regions");
194 static LCK_MTX_DECLARE(excluded_regions_mtx, &excluded_regions_grp);
195 static struct kdp_core_excluded_region *excluded_regions;
196 
197 void
kdp_core_exclude_region(vm_offset_t addr,vm_size_t size)198 kdp_core_exclude_region(vm_offset_t addr, vm_size_t size)
199 {
200 	struct kdp_core_excluded_region *region;
201 
202 	if (addr >= addr + size) {
203 		panic("%s: cannot exclude region starting at %p with size %zu (zero or overflowing size)",
204 		    __func__, (void*)addr, (size_t)size);
205 	}
206 	if (addr != round_page(addr) || size != round_page(size)) {
207 		panic("%s: cannot exclude region starting at %p with size %zu (not page aligned)",
208 		    __func__, (void*)addr, (size_t)size);
209 	}
210 
211 	region = kalloc_type(typeof(*region), Z_WAITOK | Z_NOFAIL);
212 	region->addr = addr;
213 	region->size = size;
214 
215 	lck_mtx_lock(&excluded_regions_mtx);
216 	region->next = excluded_regions;
217 	excluded_regions = region;
218 	lck_mtx_unlock(&excluded_regions_mtx);
219 }
220 
221 void
kdp_core_unexclude_region(vm_offset_t addr,vm_size_t size)222 kdp_core_unexclude_region(vm_offset_t addr, vm_size_t size)
223 {
224 	struct kdp_core_excluded_region *region;
225 	struct kdp_core_excluded_region **fixup = &excluded_regions;
226 
227 	lck_mtx_lock(&excluded_regions_mtx);
228 	for (region = excluded_regions; region; region = region->next) {
229 		if (region->addr == addr && region->size == size) {
230 			*fixup = region->next;
231 			break;
232 		}
233 		fixup = &region->next;
234 	}
235 	if (!region) {
236 		panic("%s: cannot unexclude region starting at %p with size %zu (not currently excluded)",
237 		    __func__, (void*)addr, (size_t)size);
238 	}
239 	lck_mtx_unlock(&excluded_regions_mtx);
240 
241 	// We had exclusive access to the list when we removed the region, and it is no longer
242 	// reachable from the list, so it is safe to free.
243 	kfree_type(typeof(*region), region);
244 }
245 
246 static bool
kernel_vaddr_in_excluded_region(vm_offset_t addr,uint64_t * vincr)247 kernel_vaddr_in_excluded_region(vm_offset_t addr, uint64_t *vincr)
248 {
249 	struct kdp_core_excluded_region *region;
250 
251 	// We check this earlier before attempting to dump the kernel, but verify here.
252 	assert(!kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx));
253 
254 	for (region = excluded_regions; region; region = region->next) {
255 		if (region->addr <= addr && addr < (region->addr + region->size)) {
256 			*vincr = region->size;
257 			return true;
258 		}
259 	}
260 
261 	return false;
262 }
263 
264 kern_return_t
kdp_core_output(void * kdp_core_out_state,uint64_t length,void * data)265 kdp_core_output(void *kdp_core_out_state, uint64_t length, void * data)
266 {
267 	kern_return_t              err = KERN_SUCCESS;
268 	uint64_t                   percent;
269 	struct kdp_core_out_state *vars = (struct kdp_core_out_state *)kdp_core_out_state;
270 	struct kdp_output_stage   *first_stage = STAILQ_FIRST(&vars->kcos_out_stage);
271 
272 	if (vars->kcos_error == KERN_SUCCESS) {
273 #if DEVELOPMENT || DEBUG
274 		// panic testing: force the write to fail after X number of writes
275 		if ((panic_test_case & PANIC_TEST_CASE_COREFILE_IO_ERR) && (--panic_test_action_count == 0)) {
276 			panic_test_case &= ~PANIC_TEST_CASE_COREFILE_IO_ERR;
277 			length = -1;
278 		}
279 #endif
280 
281 		if ((err = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, length, data)) != KERN_SUCCESS) {
282 			kern_coredump_log(NULL, "(kdp_core_output) outproc(KDP_DATA, NULL, 0x%llx, %p) returned 0x%x\n",
283 			    length, data, err);
284 			vars->kcos_error = err;
285 		}
286 		if (!data && !length) {
287 			kern_coredump_log(NULL, "100..");
288 		} else {
289 			vars->kcos_bytes_written += length;
290 			percent = (vars->kcos_bytes_written * 100) / vars->kcos_totalbytes;
291 			if ((percent - vars->kcos_lastpercent) >= 10) {
292 				vars->kcos_lastpercent = percent;
293 				kern_coredump_log(NULL, "%lld..\n", percent);
294 			}
295 		}
296 	}
297 	return err;
298 }
299 
300 #if defined(__arm64__)
301 extern pmap_paddr_t avail_start, avail_end;
302 extern struct vm_object pmap_object_store;
303 #endif
304 extern vm_offset_t c_buffers;
305 extern vm_size_t   c_buffers_size;
306 
307 static bool
kernel_vaddr_in_coredump_stage(const struct kdp_output_stage * stage,uint64_t vaddr,uint64_t * vincr)308 kernel_vaddr_in_coredump_stage(const struct kdp_output_stage *stage, uint64_t vaddr, uint64_t *vincr)
309 {
310 	uint64_t start_addr = (uint64_t)stage->kos_data;
311 	uint64_t end_addr = start_addr + stage->kos_data_size;
312 
313 	if (!stage->kos_data) {
314 		return false;
315 	}
316 
317 	if (vaddr >= start_addr && vaddr < end_addr) {
318 		*vincr = stage->kos_data_size - (vaddr - start_addr);
319 		return true;
320 	}
321 
322 	return false;
323 }
324 
325 static bool
kernel_vaddr_in_coredump_stages(uint64_t vaddr,uint64_t * vincr)326 kernel_vaddr_in_coredump_stages(uint64_t vaddr, uint64_t *vincr)
327 {
328 	if (kernel_vaddr_in_coredump_stage(&disk_output_stage, vaddr, vincr)) {
329 		return true;
330 	}
331 
332 	if (kernel_vaddr_in_coredump_stage(&lz4_output_stage, vaddr, vincr)) {
333 		return true;
334 	}
335 
336 	if (kernel_vaddr_in_coredump_stage(&zlib_output_stage, vaddr, vincr)) {
337 		return true;
338 	}
339 
340 	if (kernel_vaddr_in_coredump_stage(&buffer_output_stage, vaddr, vincr)) {
341 		return true;
342 	}
343 
344 	if (kernel_vaddr_in_coredump_stage(&net_output_stage, vaddr, vincr)) {
345 		return true;
346 	}
347 
348 	if (kernel_vaddr_in_coredump_stage(&progress_notify_output_stage, vaddr, vincr)) {
349 		return true;
350 	}
351 
352 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
353 	if (kernel_vaddr_in_coredump_stage(&aea_output_stage, vaddr, vincr)) {
354 		return true;
355 	}
356 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
357 
358 #if defined(__arm64__)
359 	if (kernel_vaddr_in_coredump_stage(&shmem_output_stage, vaddr, vincr)) {
360 		return true;
361 	}
362 #endif /* defined(__arm64__) */
363 
364 #if defined(__arm64__)
365 	if (kernel_vaddr_in_coredump_stage(&memory_backing_aware_buffer_output_stage, vaddr, vincr)) {
366 		return true;
367 	}
368 #endif /* defined(__arm64__) */
369 
370 	return false;
371 }
372 
373 ppnum_t
kernel_pmap_present_mapping(uint64_t vaddr,uint64_t * pvincr,uintptr_t * pvphysaddr)374 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
375 {
376 	ppnum_t ppn = 0;
377 	uint64_t vincr = PAGE_SIZE_64;
378 
379 	assert(!(vaddr & PAGE_MASK_64));
380 
381 	/* VA ranges to exclude */
382 	if (vaddr == c_buffers) {
383 		/* compressor data */
384 		ppn = 0;
385 		vincr = c_buffers_size;
386 	} else if (kernel_vaddr_in_coredump_stages(vaddr, &vincr)) {
387 		/* coredump output stage working memory */
388 		ppn = 0;
389 	} else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) {
390 		ppn = 0;
391 		vincr = kdp_core_ramdisk_size;
392 	} else
393 #if defined(__arm64__)
394 	if (vaddr == phystokv(avail_start)) {
395 		/* physical memory map */
396 		ppn = 0;
397 		vincr = (avail_end - avail_start);
398 	} else
399 #endif /* defined(__arm64__) */
400 	{
401 		ppn = (pvphysaddr != NULL ?
402 		    pmap_find_phys(kernel_pmap, vaddr) :
403 		    pmap_find_phys_nofault(kernel_pmap, vaddr));
404 	}
405 
406 	*pvincr = round_page_64(vincr);
407 
408 	if (ppn && pvphysaddr) {
409 		uint64_t phys = ptoa_64(ppn);
410 		if (physmap_enclosed(phys)) {
411 			*pvphysaddr = phystokv(phys);
412 		} else {
413 			ppn = 0;
414 		}
415 	}
416 
417 	return ppn;
418 }
419 
420 static int
pmap_traverse_present_mappings(pmap_t __unused pmap,vm_map_offset_t start,vm_map_offset_t end,pmap_traverse_callback callback,void * context)421 pmap_traverse_present_mappings(pmap_t __unused pmap,
422     vm_map_offset_t start,
423     vm_map_offset_t end,
424     pmap_traverse_callback callback,
425     void *context)
426 {
427 	IOReturn        ret;
428 	vm_map_offset_t vcurstart, vcur;
429 	uint64_t        vincr = 0;
430 	vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
431 	vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
432 #if defined(XNU_TARGET_OS_BRIDGE)
433 	vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base);
434 	vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size));
435 #endif
436 
437 	boolean_t       lastvavalid;
438 #if defined(__arm64__)
439 	vm_page_t m = VM_PAGE_NULL;
440 #endif
441 
442 #if defined(__x86_64__)
443 	assert(!is_ept_pmap(pmap));
444 #endif
445 
446 	/* Assumes pmap is locked, or being called from the kernel debugger */
447 
448 	if (start > end) {
449 		return KERN_INVALID_ARGUMENT;
450 	}
451 
452 	ret = KERN_SUCCESS;
453 	lastvavalid = FALSE;
454 	for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) {
455 		ppnum_t ppn = 0;
456 
457 #if defined(__arm64__)
458 		/* We're at the start of the physmap, so pull out the pagetable pages that
459 		 * are accessed through that region.*/
460 		if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) {
461 			m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
462 		}
463 
464 		if (m != VM_PAGE_NULL) {
465 			vm_map_offset_t vprev = vcur;
466 			ppn = (ppnum_t)atop(avail_end);
467 			while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) {
468 				/* Ignore pages that come from the static region and have already been dumped.*/
469 				if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) {
470 					ppn = VM_PAGE_GET_PHYS_PAGE(m);
471 					break;
472 				}
473 				m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
474 			}
475 			vincr = PAGE_SIZE_64;
476 			if (ppn == atop(avail_end)) {
477 				vm_object_unlock(&pmap_object_store);
478 				m = VM_PAGE_NULL;
479 				// avail_end is not a valid physical address,
480 				// so phystokv(avail_end) may not produce the expected result.
481 				vcur = phystokv(avail_start) + (avail_end - avail_start);
482 			} else {
483 				m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
484 				vcur = phystokv(ptoa(ppn));
485 			}
486 			if (vcur != vprev) {
487 				ret = callback(vcurstart, vprev, context);
488 				lastvavalid = FALSE;
489 			}
490 		}
491 		if (m == VM_PAGE_NULL) {
492 			ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
493 		}
494 #else /* defined(__arm64__) */
495 		ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
496 #endif
497 		if (ppn != 0 && kernel_vaddr_in_excluded_region(vcur, &vincr)) {
498 			/* excluded region */
499 			ppn = 0;
500 		}
501 		if (ppn != 0) {
502 			if (((vcur < debug_start) || (vcur >= debug_end))
503 			    && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
504 #if defined(XNU_TARGET_OS_BRIDGE)
505 			    // include the macOS panic region if it's mapped
506 			    && ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
507 #endif
508 			    ) {
509 				/* not something we want */
510 				ppn = 0;
511 			}
512 			/* include the phys carveout only if explictly marked */
513 			if (debug_is_in_phys_carveout(vcur) &&
514 			    !debug_can_coredump_phys_carveout()) {
515 				ppn = 0;
516 			}
517 		}
518 
519 		if (ppn != 0) {
520 			if (!lastvavalid) {
521 				/* Start of a new virtual region */
522 				vcurstart = vcur;
523 				lastvavalid = TRUE;
524 			}
525 		} else {
526 			if (lastvavalid) {
527 				/* end of a virtual region */
528 				ret = callback(vcurstart, vcur, context);
529 				lastvavalid = FALSE;
530 			}
531 
532 #if defined(__x86_64__)
533 			/* Try to skip by 2MB if possible */
534 			if ((vcur & PDMASK) == 0) {
535 				pd_entry_t *pde;
536 				pde = pmap_pde(pmap, vcur);
537 				if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
538 					/* Make sure we wouldn't overflow */
539 					if (vcur < (end - NBPD)) {
540 						vincr = NBPD;
541 					}
542 				}
543 			}
544 #endif /* defined(__x86_64__) */
545 		}
546 		vcur += vincr;
547 	}
548 
549 	if ((ret == KERN_SUCCESS) && lastvavalid) {
550 		/* send previous run */
551 		ret = callback(vcurstart, vcur, context);
552 	}
553 
554 #if KASAN
555 	if (ret == KERN_SUCCESS) {
556 		ret = kasan_traverse_mappings(callback, context);
557 	}
558 #endif
559 
560 	return ret;
561 }
562 
563 struct kern_dump_preflight_context {
564 	uint32_t region_count;
565 	uint64_t dumpable_bytes;
566 };
567 
568 int
kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)569 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
570     vm_map_offset_t end,
571     void *context)
572 {
573 	struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
574 	IOReturn ret = KERN_SUCCESS;
575 
576 	kdc->region_count++;
577 	kdc->dumpable_bytes += (end - start);
578 
579 	return ret;
580 }
581 
582 
583 struct kern_dump_send_seg_desc_context {
584 	core_save_segment_descriptions_cb callback;
585 	void *context;
586 };
587 
588 int
kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)589 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
590     vm_map_offset_t end,
591     void *context)
592 {
593 	struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
594 	uint64_t seg_start = (uint64_t) start;
595 	uint64_t seg_end = (uint64_t) end;
596 
597 	return kds_context->callback(seg_start, seg_end, kds_context->context);
598 }
599 
600 struct kern_dump_send_segdata_context {
601 	core_save_segment_data_cb callback;
602 	void *context;
603 };
604 
605 int
kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)606 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
607     vm_map_offset_t end,
608     void *context)
609 {
610 	struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
611 
612 	return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
613 }
614 
615 static kern_return_t
kern_dump_init(__unused void * refcon,void * context)616 kern_dump_init(__unused void *refcon, void *context)
617 {
618 	/* TODO: consider doing mmu flush from an init function */
619 
620 	// If excluded regions list is locked, it is unsafe to dump the kernel.
621 	if (kdp_lck_mtx_lock_spin_is_acquired(&excluded_regions_mtx)) {
622 		kern_coredump_log(context, "%s: skipping kernel because excluded regions list is locked\n",
623 		    __func__);
624 #if defined(__arm64__)
625 		panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE;
626 #else
627 		panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_KERNEL_COREDUMP_SKIPPED_EXCLUDE_REGIONS_UNAVAILABLE;
628 #endif
629 		paniclog_flush();
630 		return KERN_NODE_DOWN;
631 	}
632 
633 	return KERN_SUCCESS;
634 }
635 
636 static int
kern_dump_save_summary(__unused void * refcon,core_save_summary_cb callback,void * context)637 kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
638 {
639 	struct kern_dump_preflight_context kdc_preflight = { };
640 	uint64_t thread_state_size = 0, thread_count = 0;
641 	vm_map_offset_t vstart = kdp_core_start_addr();
642 	kern_return_t ret;
643 
644 	ret = pmap_traverse_present_mappings(kernel_pmap,
645 	    vstart,
646 	    VM_MAX_KERNEL_ADDRESS,
647 	    kern_dump_pmap_traverse_preflight_callback,
648 	    &kdc_preflight);
649 	if (ret != KERN_SUCCESS) {
650 		kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
651 		return ret;
652 	}
653 
654 	kern_collectth_state_size(&thread_count, &thread_state_size);
655 
656 	ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
657 	    thread_count, thread_state_size, 0, context);
658 	return ret;
659 }
660 
661 static int
kern_dump_save_seg_descriptions(__unused void * refcon,core_save_segment_descriptions_cb callback,void * context)662 kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
663 {
664 	vm_map_offset_t vstart = kdp_core_start_addr();
665 	kern_return_t ret;
666 	struct kern_dump_send_seg_desc_context kds_context;
667 
668 	kds_context.callback = callback;
669 	kds_context.context = context;
670 
671 	ret = pmap_traverse_present_mappings(kernel_pmap,
672 	    vstart,
673 	    VM_MAX_KERNEL_ADDRESS,
674 	    kern_dump_pmap_traverse_send_segdesc_callback,
675 	    &kds_context);
676 	if (ret != KERN_SUCCESS) {
677 		kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
678 		return ret;
679 	}
680 
681 	return KERN_SUCCESS;
682 }
683 
684 static int
kern_dump_save_thread_state(__unused void * refcon,void * buf,core_save_thread_state_cb callback,void * context)685 kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
686 {
687 	kern_return_t ret;
688 	uint64_t thread_state_size = 0, thread_count = 0;
689 
690 	kern_collectth_state_size(&thread_count, &thread_state_size);
691 
692 	if (thread_state_size > 0) {
693 		void * iter = NULL;
694 		do {
695 			kern_collectth_state(current_thread(), buf, thread_state_size, &iter);
696 
697 			ret = callback(buf, context);
698 			if (ret != KERN_SUCCESS) {
699 				return ret;
700 			}
701 		} while (iter);
702 	}
703 
704 	return KERN_SUCCESS;
705 }
706 
707 
708 static int
kern_dump_save_sw_vers_detail(__unused void * refcon,core_save_sw_vers_detail_cb callback,void * context)709 kern_dump_save_sw_vers_detail(__unused void *refcon, core_save_sw_vers_detail_cb callback, void *context)
710 {
711 	return callback(vm_kernel_stext, kernel_uuid, 0, context);
712 }
713 
714 static int
kern_dump_save_segment_data(__unused void * refcon,core_save_segment_data_cb callback,void * context)715 kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
716 {
717 	vm_map_offset_t vstart = kdp_core_start_addr();
718 	kern_return_t ret;
719 	struct kern_dump_send_segdata_context kds_context;
720 
721 	kds_context.callback = callback;
722 	kds_context.context = context;
723 
724 	ret = pmap_traverse_present_mappings(kernel_pmap,
725 	    vstart,
726 	    VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
727 	if (ret != KERN_SUCCESS) {
728 		kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
729 		return ret;
730 	}
731 
732 	return KERN_SUCCESS;
733 }
734 
735 kern_return_t
kdp_reset_output_vars(void * kdp_core_out_state,uint64_t totalbytes,bool encrypt_core,bool * out_should_skip_coredump)736 kdp_reset_output_vars(void *kdp_core_out_state, uint64_t totalbytes, bool encrypt_core, bool *out_should_skip_coredump)
737 {
738 	struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
739 	struct kdp_output_stage *current_stage = NULL;
740 
741 	/* Re-initialize kdp_outstate */
742 	outstate->kcos_totalbytes = totalbytes;
743 	outstate->kcos_bytes_written = 0;
744 	outstate->kcos_lastpercent = 0;
745 	outstate->kcos_error = KERN_SUCCESS;
746 
747 	/* Reset the output stages */
748 	STAILQ_FOREACH(current_stage, &outstate->kcos_out_stage, kos_next) {
749 		current_stage->kos_funcs.kosf_reset(current_stage);
750 	}
751 
752 	*out_should_skip_coredump = false;
753 	if (encrypt_core) {
754 		if (outstate->kcos_enforce_encryption && !outstate->kcos_encryption_stage) {
755 			*out_should_skip_coredump = true;
756 #if defined(__arm64__)
757 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
758 #else
759 			panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
760 #endif
761 			kern_coredump_log(NULL, "(kdp_reset_output_vars) Encryption requested, is unavailable, and enforcement is active. Skipping current core.\n");
762 		}
763 	} else if (outstate->kcos_encryption_stage) {
764 		outstate->kcos_encryption_stage->kos_bypass = true;
765 	}
766 
767 	return KERN_SUCCESS;
768 }
769 
770 static kern_return_t
kern_dump_update_header(struct kdp_core_out_state * outstate)771 kern_dump_update_header(struct kdp_core_out_state *outstate)
772 {
773 	struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
774 	uint64_t foffset;
775 	kern_return_t ret;
776 
777 	/* Write the file header -- first seek to the beginning of the file */
778 	foffset = 0;
779 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
780 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
781 		    sizeof(foffset), &foffset, foffset, ret);
782 		return ret;
783 	}
784 
785 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header_size, kdp_core_header)) != KERN_SUCCESS) {
786 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
787 		    kdp_core_header_size, kdp_core_header, ret);
788 		return ret;
789 	}
790 
791 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
792 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
793 		return ret;
794 	}
795 
796 #if defined(__arm64__)
797 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
798 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
799 		return ret;
800 	}
801 #endif /* defined(__arm64__) */
802 
803 	return ret;
804 }
805 
806 kern_return_t
kern_dump_record_file(void * kdp_core_out_state,const char * filename,uint64_t file_offset,uint64_t * out_file_length,uint64_t details_flags)807 kern_dump_record_file(void *kdp_core_out_state, const char *filename, uint64_t file_offset, uint64_t *out_file_length, uint64_t details_flags)
808 {
809 	kern_return_t ret = KERN_SUCCESS;
810 	uint64_t bytes_written = 0;
811 	struct mach_core_details_v2 *core_details = NULL;
812 	struct kdp_output_stage *last_stage;
813 	struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
814 
815 	assert(kdp_core_header->num_files < KERN_COREDUMP_MAX_CORES);
816 	assert(out_file_length != NULL);
817 	*out_file_length = 0;
818 
819 	last_stage = STAILQ_LAST(&outstate->kcos_out_stage, kdp_output_stage, kos_next);
820 	bytes_written = last_stage->kos_bytes_written;
821 
822 	core_details = &(kdp_core_header->files[kdp_core_header->num_files]);
823 	core_details->flags = details_flags;
824 	core_details->offset = file_offset;
825 	core_details->length = bytes_written;
826 	strncpy((char *)&core_details->core_name, filename,
827 	    MACH_CORE_FILEHEADER_NAMELEN);
828 	core_details->core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
829 
830 	kdp_core_header->num_files++;
831 
832 	ret = kern_dump_update_header(outstate);
833 	if (ret == KERN_SUCCESS) {
834 		*out_file_length = bytes_written;
835 	}
836 
837 	return ret;
838 }
839 
840 kern_return_t
kern_dump_seek_to_next_file(void * kdp_core_out_state,uint64_t next_file_offset)841 kern_dump_seek_to_next_file(void *kdp_core_out_state, uint64_t next_file_offset)
842 {
843 	struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
844 	struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
845 	kern_return_t ret;
846 
847 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != KERN_SUCCESS) {
848 		kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
849 		    sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
850 	}
851 
852 	return ret;
853 }
854 
855 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
856 
857 static kern_return_t
kern_dump_write_public_key(struct kdp_core_out_state * outstate)858 kern_dump_write_public_key(struct kdp_core_out_state *outstate)
859 {
860 	struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
861 	uint64_t foffset;
862 	uint64_t remainder = PUBLIC_KEY_RESERVED_LENGTH - kdp_core_header->pub_key_length;
863 	kern_return_t ret;
864 
865 	if (kdp_core_header->pub_key_offset == 0 || kdp_core_header->pub_key_length == 0) {
866 		// Nothing to do
867 		return KERN_SUCCESS;
868 	}
869 
870 	/* Write the public key -- first seek to the appropriate offset */
871 	foffset = kdp_core_header->pub_key_offset;
872 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
873 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
874 		    sizeof(foffset), &foffset, foffset, ret);
875 		return ret;
876 	}
877 
878 	// Write the public key
879 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
880 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
881 		    kdp_core_header->pub_key_length, kdp_core_public_key, ret);
882 		return ret;
883 	}
884 
885 	// Fill out the remainder of the block with zeroes
886 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
887 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
888 		    remainder, ret);
889 		return ret;
890 	}
891 
892 	// Do it once more to write the "next" public key
893 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
894 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
895 		    kdp_core_header->pub_key_length, kdp_core_public_key, ret);
896 		return ret;
897 	}
898 
899 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
900 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
901 		    remainder, ret);
902 		return ret;
903 	}
904 
905 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
906 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc data flush returned 0x%x\n", ret);
907 		return ret;
908 	}
909 
910 #if defined(__arm64__)
911 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
912 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc explicit flush returned 0x%x\n", ret);
913 		return ret;
914 	}
915 #endif /* defined(__arm64__) */
916 
917 	return ret;
918 }
919 
920 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
921 
922 static kern_return_t
chain_output_stages(enum kern_dump_type kd_variant,struct kdp_core_out_state * outstate,uint64_t * details_flags)923 chain_output_stages(enum kern_dump_type kd_variant, struct kdp_core_out_state *outstate, uint64_t *details_flags)
924 {
925 	struct kdp_output_stage *current = NULL;
926 
927 	assert(details_flags);
928 	*details_flags = 0;
929 
930 	switch (kd_variant) {
931 	case KERN_DUMP_STACKSHOT_DISK:
932 		OS_FALLTHROUGH;
933 	case KERN_DUMP_DISK:
934 #if defined(__arm64__)
935 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &memory_backing_aware_buffer_output_stage, kos_next);
936 #endif
937 		if (!kdp_corezip_disabled) {
938 			if (kdp_core_is_initializing_lz4_stage) {
939 				kern_coredump_log(NULL, "We were in the middle of initializing LZ4 stage. Cannot write a coredump to disk\n");
940 				return KERN_FAILURE;
941 			} else if (!lz4_output_stage.kos_initialized) {
942 				kern_coredump_log(NULL, "LZ4 stage is not yet initialized. Cannot write a coredump to disk\n");
943 				return KERN_FAILURE;
944 			}
945 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &lz4_output_stage, kos_next);
946 			*details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_LZ4;
947 		}
948 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
949 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
950 		if (kdp_core_is_initializing_encryption_stage) {
951 			kern_coredump_log(NULL, "We were in the middle of initializing encryption. Marking it as unavailable\n");
952 		} else if (aea_output_stage.kos_initialized) {
953 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &aea_output_stage, kos_next);
954 			outstate->kcos_encryption_stage = &aea_output_stage;
955 			*details_flags |= MACH_CORE_DETAILS_V2_FLAG_ENCRYPTED_AEA;
956 		}
957 		outstate->kcos_enforce_encryption = kern_dump_should_enforce_encryption();
958 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
959 		if (kdp_core_is_initializing_disk_stage) {
960 			kern_coredump_log(NULL, "We were in the middle of initializing the disk stage. Cannot write a coredump to disk\n");
961 			return KERN_FAILURE;
962 		} else if (disk_output_stage.kos_initialized == false) {
963 			kern_coredump_log(NULL, "Corefile is not yet initialized. Cannot write a coredump to disk\n");
964 			return KERN_FAILURE;
965 		}
966 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &disk_output_stage, kos_next);
967 		break;
968 	case KERN_DUMP_NET:
969 		if (!kdp_corezip_disabled) {
970 			if (!zlib_output_stage.kos_initialized) {
971 				kern_coredump_log(NULL, "Zlib stage is not initialized. Cannot write a coredump to the network\n");
972 				return KERN_FAILURE;
973 			}
974 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
975 			*details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
976 		}
977 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
978 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &buffer_output_stage, kos_next);
979 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &net_output_stage, kos_next);
980 		break;
981 #if defined(__arm64__)
982 	case KERN_DUMP_HW_SHMEM_DBG:
983 		if (!kdp_corezip_disabled) {
984 			if (!zlib_output_stage.kos_initialized) {
985 				kern_coredump_log(NULL, "Zlib stage is not initialized. Cannot write a coredump to shared memory\n");
986 				return KERN_FAILURE;
987 			}
988 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
989 			*details_flags |= MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
990 		}
991 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &shmem_output_stage, kos_next);
992 		break;
993 #endif /* defined(__arm64__) */
994 	}
995 
996 	STAILQ_FOREACH(current, &outstate->kcos_out_stage, kos_next) {
997 		current->kos_outstate = outstate;
998 	}
999 
1000 	return KERN_SUCCESS;
1001 }
1002 
1003 #if defined(__arm64__)
1004 static kern_return_t
dump_panic_buffer(struct kdp_core_out_state * outstate,char * panic_buf,size_t panic_len,uint64_t * foffset,uint64_t details_flags)1005 dump_panic_buffer(struct kdp_core_out_state *outstate, char *panic_buf, size_t panic_len,
1006     uint64_t *foffset, uint64_t details_flags)
1007 {
1008 	kern_return_t ret = KERN_SUCCESS;
1009 	bool should_skip = false;
1010 
1011 	kern_coredump_log(NULL, "\nBeginning dump of panic region of size 0x%zx\n", panic_len);
1012 
1013 	ret = kdp_reset_output_vars(outstate, panic_len, true, &should_skip);
1014 	if (KERN_SUCCESS != ret) {
1015 		return ret;
1016 	}
1017 
1018 	if (should_skip) {
1019 		kern_coredump_log(NULL, "Skipping panic region dump\n");
1020 		return ret;
1021 	}
1022 
1023 	uint64_t compressed_panic_region_len = 0;
1024 	ret = kdp_core_output(outstate, panic_len, panic_buf);
1025 	if (KERN_SUCCESS != ret) {
1026 		kern_coredump_log(NULL, "Failed to write panic region to file, kdp_coreoutput(outstate, %zu, %p) returned 0x%x\n",
1027 		    panic_len, panic_buf, ret);
1028 		return ret;
1029 	}
1030 
1031 	ret = kdp_core_output(outstate, 0, NULL);
1032 	if (KERN_SUCCESS != ret) {
1033 		kern_coredump_log(NULL, "Failed to flush panic region data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", outstate, ret);
1034 		return ret;
1035 	}
1036 
1037 	ret = kern_dump_record_file(outstate, "panic_region", *foffset, &compressed_panic_region_len,
1038 	    details_flags);
1039 	if (KERN_SUCCESS != ret) {
1040 		kern_coredump_log(NULL, "Failed to record panic region in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1041 		return ret;
1042 	}
1043 
1044 	kern_coredump_log(NULL, "Recorded panic region in corefile at offset 0x%llx, compressed to %llu bytes\n", *foffset, compressed_panic_region_len);
1045 	*foffset = roundup((*foffset + compressed_panic_region_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1046 
1047 	ret = kern_dump_seek_to_next_file(outstate, *foffset);
1048 	if (KERN_SUCCESS != ret) {
1049 		kern_coredump_log(NULL, "Failed to seek to panic region file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", *foffset, ret);
1050 		return ret;
1051 	}
1052 
1053 	return ret;
1054 }
1055 #endif /* defined(__arm64__) */
1056 
1057 static int
do_kern_dump(enum kern_dump_type kd_variant)1058 do_kern_dump(enum kern_dump_type kd_variant)
1059 {
1060 	struct kdp_core_out_state outstate = { };
1061 	struct kdp_output_stage *first_stage = NULL;
1062 	char *coredump_log_start = NULL, *buf = NULL;
1063 	size_t reserved_debug_logsize = 0, prior_debug_logsize = 0;
1064 	uint64_t foffset = 0;
1065 	kern_return_t ret = KERN_SUCCESS;
1066 	boolean_t output_opened = FALSE, dump_succeeded = TRUE;
1067 	uint64_t details_flags = 0;
1068 
1069 	/* Initialize output context */
1070 
1071 	bzero(&outstate, sizeof(outstate));
1072 	STAILQ_INIT(&outstate.kcos_out_stage);
1073 	ret = chain_output_stages(kd_variant, &outstate, &details_flags);
1074 	if (KERN_SUCCESS != ret) {
1075 		dump_succeeded = FALSE;
1076 		goto exit;
1077 	}
1078 	first_stage = STAILQ_FIRST(&outstate.kcos_out_stage);
1079 
1080 	/*
1081 	 * Record the initial panic log buffer length so we can dump the coredump log
1082 	 * and panic log to disk
1083 	 */
1084 	coredump_log_start = debug_buf_ptr;
1085 #if defined(__arm64__)
1086 	assert(panic_info->eph_other_log_offset != 0);
1087 	assert(panic_info->eph_panic_log_len != 0);
1088 	/* Include any data from before the panic log as well */
1089 	prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1090 	    panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
1091 #else /* defined(__arm64__) */
1092 	if (panic_info->mph_panic_log_offset != 0) {
1093 		prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1094 		    panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
1095 	}
1096 #endif /* defined(__arm64__) */
1097 
1098 	assert(prior_debug_logsize <= debug_buf_size);
1099 
1100 	if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1101 		/* Open the file for output */
1102 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_WRQ, NULL, 0, NULL)) != KERN_SUCCESS) {
1103 			kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
1104 			dump_succeeded = FALSE;
1105 			goto exit;
1106 		}
1107 	}
1108 	output_opened = true;
1109 
1110 	if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1111 		const size_t aligned_corefile_header_size = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1112 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1113 		const size_t aligned_public_key_size = PUBLIC_KEY_RESERVED_LENGTH * 2;
1114 #else
1115 		const size_t aligned_public_key_size = 0;
1116 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1117 
1118 		reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE;
1119 
1120 		/* Space for file header, public key, panic log, core log */
1121 		foffset = roundup(aligned_corefile_header_size + aligned_public_key_size + reserved_debug_logsize, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1122 		kdp_core_header->log_offset = aligned_corefile_header_size + aligned_public_key_size;
1123 
1124 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1125 		/* Write the public key */
1126 		ret = kern_dump_write_public_key(&outstate);
1127 		if (KERN_SUCCESS != ret) {
1128 			kern_coredump_log(NULL, "(do_kern_dump write public key) returned 0x%x\n", ret);
1129 			dump_succeeded = FALSE;
1130 			goto exit;
1131 		}
1132 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1133 
1134 		/* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
1135 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1136 			kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1137 			    sizeof(foffset), &foffset, foffset, ret);
1138 			dump_succeeded = FALSE;
1139 			goto exit;
1140 		}
1141 	}
1142 
1143 #if defined(__arm64__)
1144 	flush_mmu_tlb();
1145 #endif
1146 
1147 	kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores...\n" :
1148 	    "Transmitting kernel state, please wait:\n");
1149 
1150 #if defined (__arm64__)
1151 	char *panic_buf = (char *)gPanicBase;
1152 	size_t panic_len = (vm_offset_t)debug_buf_ptr - gPanicBase;
1153 	if (kd_variant == KERN_DUMP_DISK && (panic_buf && panic_len)) {
1154 		ret = dump_panic_buffer(&outstate, panic_buf, panic_len, &foffset, details_flags);
1155 		if (KERN_SUCCESS != ret) {
1156 			dump_succeeded = FALSE;
1157 		}
1158 	}
1159 #endif
1160 
1161 #if defined(__x86_64__)
1162 	if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) {
1163 		bool should_skip = false;
1164 
1165 		kern_coredump_log(NULL, "\nBeginning dump of kernel stackshot\n");
1166 
1167 		ret = kdp_reset_output_vars(&outstate, panic_stackshot_len, true, &should_skip);
1168 
1169 		if (ret != KERN_SUCCESS) {
1170 			kern_coredump_log(NULL, "Failed to reset outstate for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
1171 			dump_succeeded = FALSE;
1172 		} else if (!should_skip) {
1173 			uint64_t compressed_stackshot_len = 0;
1174 			if ((ret = kdp_core_output(&outstate, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
1175 				kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outstate, %lu, %p) returned 0x%x\n",
1176 				    panic_stackshot_len, (void *) panic_stackshot_buf, ret);
1177 				dump_succeeded = FALSE;
1178 			} else if ((ret = kdp_core_output(&outstate, 0, NULL)) != KERN_SUCCESS) {
1179 				kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outstate, ret);
1180 				dump_succeeded = FALSE;
1181 			} else if ((ret = kern_dump_record_file(&outstate, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len, details_flags)) != KERN_SUCCESS) {
1182 				kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
1183 				dump_succeeded = FALSE;
1184 			} else {
1185 				kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
1186 				foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1187 				if ((ret = kern_dump_seek_to_next_file(&outstate, foffset)) != KERN_SUCCESS) {
1188 					kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
1189 					dump_succeeded = FALSE;
1190 				}
1191 			}
1192 		} else {
1193 			kern_coredump_log(NULL, "Skipping stackshot dump\n");
1194 		}
1195 	}
1196 #endif
1197 
1198 	if (kd_variant == KERN_DUMP_DISK) {
1199 		/*
1200 		 * Dump co-processors as well, foffset will be overwritten with the
1201 		 * offset of the next location in the file to be written to.
1202 		 */
1203 		if (kern_do_coredump(&outstate, FALSE, foffset, &foffset, details_flags) != 0) {
1204 			dump_succeeded = FALSE;
1205 		}
1206 	} else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) {
1207 		/* Only the kernel */
1208 		if (kern_do_coredump(&outstate, TRUE, foffset, &foffset, details_flags) != 0) {
1209 			dump_succeeded = FALSE;
1210 		}
1211 	}
1212 
1213 	if (kd_variant == KERN_DUMP_DISK) {
1214 		assert(reserved_debug_logsize != 0);
1215 		size_t remaining_debug_logspace = reserved_debug_logsize;
1216 
1217 		/* Write the debug log -- first seek to the end of the corefile header */
1218 		foffset = kdp_core_header->log_offset;
1219 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1220 			kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1221 			    sizeof(foffset), &foffset, foffset, ret);
1222 			dump_succeeded = FALSE;
1223 			goto exit;
1224 		}
1225 
1226 		/* First flush the data from just the paniclog */
1227 		size_t initial_log_length = 0;
1228 #if defined(__arm64__)
1229 		initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1230 		    panic_info->eph_panic_log_len;
1231 #else
1232 		if (panic_info->mph_panic_log_offset != 0) {
1233 			initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1234 			    panic_info->mph_panic_log_len;
1235 		}
1236 #endif
1237 
1238 		buf = debug_buf_base;
1239 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, initial_log_length, buf)) != KERN_SUCCESS) {
1240 			kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1241 			    initial_log_length, buf, ret);
1242 			dump_succeeded = FALSE;
1243 			goto exit;
1244 		}
1245 
1246 		remaining_debug_logspace -= initial_log_length;
1247 
1248 		/* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1249 #if defined(__arm64__)
1250 		buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1251 #else
1252 		/*
1253 		 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1254 		 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1255 		 * we began taking a coredump.
1256 		 */
1257 		if (panic_info->mph_other_log_offset != 0) {
1258 			buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1259 		} else {
1260 			buf = coredump_log_start;
1261 		}
1262 #endif
1263 		assert(debug_buf_ptr >= buf);
1264 
1265 		size_t other_log_length = debug_buf_ptr - buf;
1266 		if (other_log_length > remaining_debug_logspace) {
1267 			other_log_length = remaining_debug_logspace;
1268 		}
1269 
1270 		/* Write the coredump log */
1271 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, other_log_length, buf)) != KERN_SUCCESS) {
1272 			kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1273 			    other_log_length, buf, ret);
1274 			dump_succeeded = FALSE;
1275 			goto exit;
1276 		}
1277 
1278 		kdp_core_header->log_length = initial_log_length + other_log_length;
1279 		kern_dump_update_header(&outstate);
1280 	}
1281 
1282 exit:
1283 	/* close / last packet */
1284 	if (output_opened && (ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_EOF, NULL, 0, ((void *) 0))) != KERN_SUCCESS) {
1285 		kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1286 		dump_succeeded = FALSE;
1287 	}
1288 
1289 	/* If applicable, update the panic header and flush it so we update the CRC */
1290 #if defined(__arm64__)
1291 	panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1292 	    EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1293 	paniclog_flush();
1294 #else
1295 	if (panic_info->mph_panic_log_offset != 0) {
1296 		panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1297 		    MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1298 		paniclog_flush();
1299 	}
1300 #endif
1301 
1302 	return dump_succeeded ? 0 : -1;
1303 }
1304 
1305 boolean_t
dumped_kernel_core(void)1306 dumped_kernel_core(void)
1307 {
1308 	return kern_dump_successful;
1309 }
1310 
1311 int
kern_dump(enum kern_dump_type kd_variant)1312 kern_dump(enum kern_dump_type kd_variant)
1313 {
1314 	static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1315 	int ret = -1;
1316 #if KASAN
1317 	kasan_kdp_disable();
1318 #endif
1319 	if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1320 		if (dumped_local) {
1321 			return 0;
1322 		}
1323 		if (local_dump_in_progress) {
1324 			return -1;
1325 		}
1326 		local_dump_in_progress = TRUE;
1327 		ret = do_kern_dump(kd_variant);
1328 		if (ret == 0) {
1329 			dumped_local = TRUE;
1330 			kern_dump_successful = TRUE;
1331 			local_dump_in_progress = FALSE;
1332 		}
1333 
1334 		return ret;
1335 #if defined(__arm64__)
1336 	} else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1337 		ret = do_kern_dump(kd_variant);
1338 		if (ret == 0) {
1339 			kern_dump_successful = TRUE;
1340 		}
1341 		return ret;
1342 #endif
1343 	} else {
1344 		ret = do_kern_dump(kd_variant);
1345 		if (ret == 0) {
1346 			kern_dump_successful = TRUE;
1347 		}
1348 		return ret;
1349 	}
1350 }
1351 
1352 static kern_return_t
kdp_core_init_output_stages(void)1353 kdp_core_init_output_stages(void)
1354 {
1355 	kern_return_t ret = KERN_SUCCESS;
1356 
1357 	// We only zero-out the disk stage. It will be initialized
1358 	// later on when the corefile is initialized
1359 	bzero(&disk_output_stage, sizeof(disk_output_stage));
1360 
1361 	// We only zero-out the LZ4 stage. It will be initialized
1362 	// later on when the kext is loaded.
1363 	bzero(&lz4_output_stage, sizeof(lz4_output_stage));
1364 	lz4_stage_monitor_availability();
1365 
1366 	// We only initialize the zlib output stage if we can reach the debugger.
1367 	// This saves us from wasting some wired memory that will never be used
1368 	// in other configurations.
1369 	bzero(&zlib_output_stage, sizeof(zlib_output_stage));
1370 	if (debug_boot_arg && (debug_boot_arg & DB_REBOOT_ALWAYS) == 0) {
1371 		ret = zlib_stage_initialize(&zlib_output_stage);
1372 		if (KERN_SUCCESS != ret) {
1373 			return ret;
1374 		}
1375 	}
1376 
1377 	bzero(&buffer_output_stage, sizeof(buffer_output_stage));
1378 	ret = buffer_stage_initialize(&buffer_output_stage, kdp_crashdump_pkt_size);
1379 	if (KERN_SUCCESS != ret) {
1380 		return ret;
1381 	}
1382 
1383 	bzero(&net_output_stage, sizeof(net_output_stage));
1384 	ret = net_stage_initialize(&net_output_stage);
1385 	if (KERN_SUCCESS != ret) {
1386 		return ret;
1387 	}
1388 
1389 	bzero(&progress_notify_output_stage, sizeof(progress_notify_output_stage));
1390 	ret = progress_notify_stage_initialize(&progress_notify_output_stage);
1391 	if (KERN_SUCCESS != ret) {
1392 		return ret;
1393 	}
1394 
1395 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1396 	// We only zero-out the AEA stage. It will be initialized
1397 	// later on, if it's supported and needed
1398 	bzero(&aea_output_stage, sizeof(aea_output_stage));
1399 	aea_stage_monitor_availability();
1400 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1401 
1402 #if defined(__arm64__)
1403 	bzero(&shmem_output_stage, sizeof(shmem_output_stage));
1404 	if (PE_consistent_debug_enabled() && PE_i_can_has_debugger(NULL)) {
1405 		ret = shmem_stage_initialize(&shmem_output_stage);
1406 		if (KERN_SUCCESS != ret) {
1407 			return ret;
1408 		}
1409 	}
1410 #endif /* defined(__arm64__) */
1411 
1412 #if defined(__arm64__)
1413 	bzero(&memory_backing_aware_buffer_output_stage, sizeof(memory_backing_aware_buffer_output_stage));
1414 	ret = memory_backing_aware_buffer_stage_initialize(&memory_backing_aware_buffer_output_stage);
1415 	if (KERN_SUCCESS != ret) {
1416 		return ret;
1417 	}
1418 #endif /* defined(__arm64__) */
1419 
1420 	return ret;
1421 }
1422 
1423 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1424 
1425 static bool
kern_dump_should_enforce_encryption(void)1426 kern_dump_should_enforce_encryption(void)
1427 {
1428 	static int enforce_encryption = -1;
1429 
1430 	// Only check once
1431 	if (enforce_encryption == -1) {
1432 		uint32_t coredump_encryption_flags = 0;
1433 
1434 		// When set, the boot-arg is the sole decider
1435 		if (!kernel_debugging_restricted() &&
1436 		    PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags))) {
1437 			enforce_encryption = (coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT) != 0 ? 1 : 0;
1438 		} else {
1439 			enforce_encryption = 0;
1440 		}
1441 	}
1442 
1443 	return enforce_encryption != 0;
1444 }
1445 
1446 static bool
kern_dump_is_encryption_available(void)1447 kern_dump_is_encryption_available(void)
1448 {
1449 	// Default to feature enabled unless boot-arg says otherwise
1450 	uint32_t coredump_encryption_flags = COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY;
1451 
1452 	if (!kernel_debugging_restricted()) {
1453 		PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags));
1454 	}
1455 
1456 	if ((coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY) == 0) {
1457 		return false;
1458 	}
1459 
1460 	return aea_stage_is_available();
1461 }
1462 
1463 /*
1464  * Initialize (or de-initialize) the encryption stage. This is done in a way such that if initializing the
1465  * encryption stage with a new key fails, then the existing encryption stage is left untouched. Once
1466  * the new stage is initialized, the old stage is uninitialized.
1467  *
1468  * This function is called whenever we have a new public key (whether from someone calling our sysctl, or because
1469  * we read it out of a corefile), or when encryption becomes available.
1470  *
1471  * Parameters:
1472  *  - public_key:      The public key to use when initializing the encryption stage. Can be NULL to indicate that
1473  *                     the encryption stage should be de-initialized.
1474  *  - public_key_size: The size of the given public key.
1475  */
1476 static kern_return_t
kdp_core_init_encryption_stage(void * public_key,size_t public_key_size)1477 kdp_core_init_encryption_stage(void *public_key, size_t public_key_size)
1478 {
1479 	kern_return_t ret = KERN_SUCCESS;
1480 	struct kdp_output_stage new_encryption_stage = {};
1481 	struct kdp_output_stage old_encryption_stage = {};
1482 
1483 	lck_mtx_assert(kdp_core_encryption_stage_lock, LCK_MTX_ASSERT_OWNED);
1484 
1485 	bzero(&new_encryption_stage, sizeof(new_encryption_stage));
1486 
1487 	if (public_key && kern_dump_is_encryption_available()) {
1488 		ret = aea_stage_initialize(&new_encryption_stage, public_key, public_key_size);
1489 		if (KERN_SUCCESS != ret) {
1490 			printf("(kdp_core_init_encryption_stage) Failed to initialize the encryption stage. Error 0x%x\n", ret);
1491 			return ret;
1492 		}
1493 	}
1494 
1495 	bcopy(&aea_output_stage, &old_encryption_stage, sizeof(aea_output_stage));
1496 
1497 	bcopy(&new_encryption_stage, &aea_output_stage, sizeof(new_encryption_stage));
1498 
1499 	if (old_encryption_stage.kos_initialized && old_encryption_stage.kos_funcs.kosf_free) {
1500 		old_encryption_stage.kos_funcs.kosf_free(&old_encryption_stage);
1501 	}
1502 
1503 	return KERN_SUCCESS;
1504 }
1505 
1506 kern_return_t
kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data,void * access_context,void * recipient_context)1507 kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context)
1508 {
1509 	kern_return_t ret = KERN_SUCCESS;
1510 	struct kdp_core_encryption_key_descriptor *key_descriptor = (struct kdp_core_encryption_key_descriptor *) recipient_context;
1511 	void *old_public_key = NULL;
1512 	size_t old_public_key_size = 0;
1513 
1514 	if (!key_descriptor) {
1515 		return kIOReturnBadArgument;
1516 	}
1517 
1518 	lck_mtx_lock(kdp_core_encryption_stage_lock);
1519 	kdp_core_is_initializing_encryption_stage = true;
1520 
1521 	do {
1522 		// Do the risky part first, and bail out cleanly if it fails
1523 		ret = kdp_core_init_encryption_stage(key_descriptor->kcekd_key, key_descriptor->kcekd_size);
1524 		if (ret != KERN_SUCCESS) {
1525 			printf("kdp_core_handle_new_encryption_key failed to re-initialize encryption stage. Error 0x%x\n", ret);
1526 			break;
1527 		}
1528 
1529 		// The rest of this function should technically never fail
1530 
1531 		old_public_key = kdp_core_public_key;
1532 		old_public_key_size = kdp_core_header->pub_key_length;
1533 
1534 		kdp_core_public_key = key_descriptor->kcekd_key;
1535 		kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1536 		kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_EXISTING_COREFILE_KEY_FORMAT_MASK;
1537 		if (key_descriptor->kcekd_key) {
1538 			kdp_core_header->flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1539 			kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(key_descriptor->kcekd_format);
1540 			kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1541 			kdp_core_header->pub_key_length = key_descriptor->kcekd_size;
1542 		} else {
1543 			kdp_core_header->pub_key_offset = 0;
1544 			kdp_core_header->pub_key_length = 0;
1545 		}
1546 
1547 		/*
1548 		 * Return the old key to the caller to free
1549 		 */
1550 		key_descriptor->kcekd_key = old_public_key;
1551 		key_descriptor->kcekd_size = (uint16_t)old_public_key_size;
1552 
1553 		// If this stuff fails, we have bigger problems
1554 		struct mach_core_fileheader_v2 existing_header;
1555 		bool used_existing_header = false;
1556 		ret = access_data(access_context, FALSE, 0, sizeof(existing_header), &existing_header);
1557 		if (ret != KERN_SUCCESS) {
1558 			printf("kdp_core_handle_new_encryption_key failed to read the existing corefile header. Error 0x%x\n", ret);
1559 			break;
1560 		}
1561 
1562 		if (existing_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1563 		    && existing_header.version == 2
1564 		    && (existing_header.pub_key_length == 0
1565 		    || kdp_core_header->pub_key_length == 0
1566 		    || existing_header.pub_key_length == kdp_core_header->pub_key_length)) {
1567 			used_existing_header = true;
1568 			existing_header.flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1569 
1570 			if (kdp_core_public_key) {
1571 				existing_header.flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1572 
1573 				if (existing_header.pub_key_offset == 0) {
1574 					existing_header.pub_key_offset = kdp_core_header->pub_key_offset;
1575 					existing_header.pub_key_length = kdp_core_header->pub_key_length;
1576 				}
1577 			}
1578 
1579 			ret = access_data(access_context, TRUE, 0, sizeof(existing_header), &existing_header);
1580 			if (ret != KERN_SUCCESS) {
1581 				printf("kdp_core_handle_new_encryption_key failed to update the existing corefile header. Error 0x%x\n", ret);
1582 				break;
1583 			}
1584 		} else {
1585 			ret = access_data(access_context, TRUE, 0, sizeof(struct mach_core_fileheader_v2), kdp_core_header);
1586 			if (ret != KERN_SUCCESS) {
1587 				printf("kdp_core_handle_new_encryption_key failed to write the corefile header. Error 0x%x\n", ret);
1588 				break;
1589 			}
1590 		}
1591 
1592 		if (kdp_core_header->pub_key_length) {
1593 			uint64_t offset = used_existing_header ? existing_header.pub_key_offset : kdp_core_header->pub_key_offset;
1594 			ret = access_data(access_context, TRUE, offset + PUBLIC_KEY_RESERVED_LENGTH, kdp_core_header->pub_key_length, kdp_core_public_key);
1595 			if (ret != KERN_SUCCESS) {
1596 				printf("kdp_core_handle_new_encryption_key failed to write the next public key. Error 0x%x\n", ret);
1597 				break;
1598 			}
1599 
1600 			if (!used_existing_header) {
1601 				// Everything that happens here is optional. It's not the end of the world if this stuff fails, so we don't return
1602 				// any errors
1603 				// Since we're writing out a completely new header, we make sure to zero-out the region that's reserved for the public key.
1604 				// This allows us consumers of the corefile to know for sure that this corefile is not encrypted (yet). Once we actually
1605 				// write out a corefile, we'll overwrite this region with the key that we ended up using at the time.
1606 				// If we fail to zero-out this region, consumers would read garbage data and properly fail to interpret it as a public key,
1607 				// which is why it is OK for us to fail here (it's hard to interpret garbage data as a valid key, and even then, they wouldn't
1608 				// find a matching private key anyway)
1609 				void *empty_key = NULL;
1610 				kern_return_t temp_ret = KERN_SUCCESS;
1611 
1612 				empty_key = kalloc_data(PUBLIC_KEY_RESERVED_LENGTH,
1613 				    Z_WAITOK | Z_ZERO | Z_NOFAIL);
1614 
1615 				temp_ret = access_data(access_context, TRUE, offset, PUBLIC_KEY_RESERVED_LENGTH, empty_key);
1616 				kfree_data(empty_key, PUBLIC_KEY_RESERVED_LENGTH);
1617 
1618 				if (temp_ret != KERN_SUCCESS) {
1619 					printf("kdp_core_handle_new_encryption_key failed to zero-out the public key region. Error 0x%x\n", temp_ret);
1620 					break;
1621 				}
1622 			}
1623 		}
1624 	} while (0);
1625 
1626 	kdp_core_is_initializing_encryption_stage = false;
1627 	lck_mtx_unlock(kdp_core_encryption_stage_lock);
1628 
1629 	return ret;
1630 }
1631 
1632 kern_return_t
kdp_core_handle_encryption_available(void)1633 kdp_core_handle_encryption_available(void)
1634 {
1635 	kern_return_t ret;
1636 
1637 	lck_mtx_lock(kdp_core_encryption_stage_lock);
1638 	kdp_core_is_initializing_encryption_stage = true;
1639 
1640 	ret = kdp_core_init_encryption_stage(kdp_core_public_key, kdp_core_header->pub_key_length);
1641 
1642 	kdp_core_is_initializing_encryption_stage = false;
1643 	lck_mtx_unlock(kdp_core_encryption_stage_lock);
1644 
1645 	return ret;
1646 }
1647 
1648 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1649 
1650 kern_return_t
kdp_core_handle_lz4_available(void)1651 kdp_core_handle_lz4_available(void)
1652 {
1653 	kern_return_t ret;
1654 	lck_mtx_lock(kdp_core_lz4_stage_lock);
1655 	kdp_core_is_initializing_lz4_stage = true;
1656 
1657 	ret = lz4_stage_initialize(&lz4_output_stage);
1658 
1659 	kdp_core_is_initializing_lz4_stage = false;
1660 	lck_mtx_unlock(kdp_core_lz4_stage_lock);
1661 
1662 	return ret;
1663 }
1664 
1665 kern_return_t
kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data,void * access_context,__unused void * recipient_context)1666 kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, __unused void *recipient_context)
1667 {
1668 	kern_return_t ret = KERN_SUCCESS;
1669 
1670 	lck_mtx_lock(kdp_core_disk_stage_lock);
1671 	kdp_core_is_initializing_disk_stage = true;
1672 
1673 	ret = disk_stage_initialize(&disk_output_stage);
1674 
1675 	kdp_core_is_initializing_disk_stage = false;
1676 	lck_mtx_unlock(kdp_core_disk_stage_lock);
1677 
1678 	if (KERN_SUCCESS != ret) {
1679 		return ret;
1680 	}
1681 
1682 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1683 	// If someone has already provided a new public key,
1684 	// there's no sense in reading the old one from the corefile.
1685 	if (kdp_core_public_key != NULL) {
1686 		return KERN_SUCCESS;
1687 	}
1688 
1689 	// The kernel corefile is now available. Let's try to retrieve the public key from its
1690 	// header (if available and supported).
1691 
1692 	// First let's read the corefile header itself
1693 	struct mach_core_fileheader_v2 temp_header = {};
1694 	ret = access_data(access_context, FALSE, 0, sizeof(temp_header), &temp_header);
1695 	if (KERN_SUCCESS != ret) {
1696 		printf("kdp_core_polled_io_polled_file_available failed to read corefile header. Error 0x%x\n", ret);
1697 		return ret;
1698 	}
1699 
1700 	// Check if the corefile header is initialized, and whether it's initialized to values that we support
1701 	// (for backwards and forwards) compatibility, and check whether the header indicates that the corefile has
1702 	// has a public key stashed inside of it.
1703 	if (temp_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1704 	    && temp_header.version == 2
1705 	    && temp_header.pub_key_offset != 0
1706 	    && temp_header.pub_key_length != 0
1707 	    /* Future-proofing: make sure it's the key format that we support */
1708 	    && (temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK) == MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256
1709 	    /* Add some extra sanity checks. These are not necessary */
1710 	    && temp_header.pub_key_length <= 4096
1711 	    && temp_header.pub_key_offset < 65535) {
1712 		// The corefile header is properly initialized, is supported, and contains a public key.
1713 		// Let's adopt that public key for our encryption needs
1714 		void *public_key = NULL;
1715 
1716 		public_key = kalloc_data(temp_header.pub_key_length,
1717 		    Z_ZERO | Z_WAITOK | Z_NOFAIL);
1718 
1719 		// Read the public key from the corefile. Note that the key we're trying to adopt is the "next" key, which is
1720 		// PUBLIC_KEY_RESERVED_LENGTH bytes after the public key.
1721 		ret = access_data(access_context, FALSE, temp_header.pub_key_offset + PUBLIC_KEY_RESERVED_LENGTH, temp_header.pub_key_length, public_key);
1722 		if (KERN_SUCCESS != ret) {
1723 			printf("kdp_core_polled_io_polled_file_available failed to read the public key. Error 0x%x\n", ret);
1724 			kfree_data(public_key, temp_header.pub_key_length);
1725 			return ret;
1726 		}
1727 
1728 		lck_mtx_lock(kdp_core_encryption_stage_lock);
1729 		kdp_core_is_initializing_encryption_stage = true;
1730 
1731 		ret = kdp_core_init_encryption_stage(public_key, temp_header.pub_key_length);
1732 		if (KERN_SUCCESS == ret) {
1733 			kdp_core_header->flags |= temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1734 			kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(temp_header.flags);
1735 			kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1736 			kdp_core_header->pub_key_length = temp_header.pub_key_length;
1737 			kdp_core_public_key = public_key;
1738 		}
1739 
1740 		kdp_core_is_initializing_encryption_stage = false;
1741 		lck_mtx_unlock(kdp_core_encryption_stage_lock);
1742 	}
1743 #else
1744 #pragma unused(access_data, access_context)
1745 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1746 
1747 	return ret;
1748 }
1749 
1750 kern_return_t
kdp_core_polled_io_polled_file_unavailable(void)1751 kdp_core_polled_io_polled_file_unavailable(void)
1752 {
1753 	lck_mtx_lock(kdp_core_disk_stage_lock);
1754 	kdp_core_is_initializing_disk_stage = true;
1755 
1756 	if (disk_output_stage.kos_initialized && disk_output_stage.kos_funcs.kosf_free) {
1757 		disk_output_stage.kos_funcs.kosf_free(&disk_output_stage);
1758 	}
1759 
1760 	kdp_core_is_initializing_disk_stage = false;
1761 	lck_mtx_unlock(kdp_core_disk_stage_lock);
1762 
1763 	return KERN_SUCCESS;
1764 }
1765 
1766 void
kdp_core_init(void)1767 kdp_core_init(void)
1768 {
1769 	kern_return_t kr;
1770 	kern_coredump_callback_config core_config = { };
1771 
1772 	/* Initialize output stages */
1773 	kr = kdp_core_init_output_stages();
1774 	assert(KERN_SUCCESS == kr);
1775 
1776 	kmem_alloc(kernel_map, (vm_offset_t*)&kdp_core_header,
1777 	    kdp_core_header_size,
1778 	    KMA_NOFAIL | KMA_ZERO | KMA_PERMANENT | KMA_KOBJECT | KMA_DATA,
1779 	    VM_KERN_MEMORY_DIAG);
1780 
1781 	kdp_core_header->signature = MACH_CORE_FILEHEADER_V2_SIGNATURE;
1782 	kdp_core_header->version = 2;
1783 
1784 	kdp_core_initialization_lock_group = lck_grp_alloc_init("KDPCoreStageInit", LCK_GRP_ATTR_NULL);
1785 	kdp_core_disk_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1786 
1787 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1788 	kdp_core_encryption_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1789 
1790 	(void) kern_dump_should_enforce_encryption();
1791 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1792 
1793 	kdp_core_lz4_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1794 
1795 	core_config.kcc_coredump_init = kern_dump_init;
1796 	core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1797 	core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1798 	core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1799 	core_config.kcc_coredump_save_sw_vers_detail = kern_dump_save_sw_vers_detail;
1800 	core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1801 	core_config.kcc_coredump_save_note_summary = kern_dump_save_note_summary;
1802 	core_config.kcc_coredump_save_note_descriptions = kern_dump_save_note_descriptions;
1803 	core_config.kcc_coredump_save_note_data = kern_dump_save_note_data;
1804 
1805 	kr = kern_register_xnu_coredump_helper(&core_config);
1806 	assert(KERN_SUCCESS == kr);
1807 }
1808 
1809 /*
1810  * Additional LC_NOTES added to the core.
1811  */
1812 
1813 static kern_return_t
kern_dump_save_note_summary(void * refcon __unused,core_save_note_summary_cb callback,void * context)1814 kern_dump_save_note_summary(void *refcon __unused, core_save_note_summary_cb callback, void *context)
1815 {
1816 	int count = 1;
1817 	size_t size = sizeof(addrable_bits_note_t);
1818 
1819 
1820 	return callback(count, size, context);
1821 }
1822 
1823 static kern_return_t
kern_dump_save_note_descriptions(void * refcon __unused,core_save_note_descriptions_cb callback,void * context)1824 kern_dump_save_note_descriptions(void *refcon __unused, core_save_note_descriptions_cb callback, void *context)
1825 {
1826 	int max_ret = KERN_SUCCESS;
1827 	int ret;
1828 
1829 	max_ret = ret = callback(ADDRABLE_BITS_DATA_OWNER, sizeof(addrable_bits_note_t), context);
1830 
1831 
1832 	return max_ret;
1833 }
1834 
1835 static kern_return_t
kern_dump_save_note_data(void * refcon __unused,core_save_note_data_cb callback,void * context)1836 kern_dump_save_note_data(void *refcon __unused, core_save_note_data_cb callback, void *context)
1837 {
1838 	int max_ret = KERN_SUCCESS;
1839 	int ret;
1840 
1841 	addrable_bits_note_t note = {
1842 		.version = ADDRABLE_BITS_VER,
1843 		.addressing_bits = pmap_kernel_va_bits(),
1844 		.unused = 0
1845 	};
1846 
1847 	max_ret = ret = callback(&note, sizeof(addrable_bits_note_t), context);
1848 
1849 
1850 	return max_ret;
1851 }
1852 
1853 #else
1854 
1855 void
kdp_core_exclude_region(__unused vm_offset_t addr,__unused vm_size_t size)1856 kdp_core_exclude_region(__unused vm_offset_t addr, __unused vm_size_t size)
1857 {
1858 }
1859 
1860 void
kdp_core_unexclude_region(__unused vm_offset_t addr,__unused vm_size_t size)1861 kdp_core_unexclude_region(__unused vm_offset_t addr, __unused vm_size_t size)
1862 {
1863 }
1864 
1865 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1866