xref: /xnu-8019.80.24/osfmk/kdp/kdp_core.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * The main orchestrator for kernel (and co-processor) coredumps. Here's a very simplistic view of
31  * the flow:
32  *
33  * At kernel initialization time (kdp_core_init):
34  * ----------------------------------------------
35  *
36  * - kdp_core_init() takes care of allocating all necessary data structures and initializes the
37  *   coredump output stages
38  *
39  * At coredump time (do_kern_dump):
40  * --------------------------------
41  *
42  * - Depending on the coredump variant, we chain the necessary output stages together in chain_output_stages()
43  * - [Disk only] We initialize the corefile header
44  * - [Disk only] We stream the stackshot out through the output stages and update the corefile header
45  * - We perform the kernel coredump, streaming it out through the output stages
46  * - [Disk only] We update the corefile header
47  * - [Disk only] We perform the co-processor coredumps (driven by kern_do_coredump), streaming each out
48  *               through the output stages and updating the corefile header.
49  * - [Disk only] We save the coredump log to the corefile
50  */
51 
52 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
53 
54 #include <mach/mach_types.h>
55 #include <mach/vm_attributes.h>
56 #include <mach/vm_param.h>
57 #include <mach/vm_map.h>
58 #include <vm/vm_protos.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_map.h>
61 #include <machine/cpu_capabilities.h>
62 #include <libsa/types.h>
63 #include <libkern/kernel_mach_header.h>
64 #include <kern/locks.h>
65 #include <kdp/kdp_internal.h>
66 #include <kdp/kdp_core.h>
67 #include <kdp/output_stages/output_stages.h>
68 #include <kdp/processor_core.h>
69 #include <IOKit/IOTypes.h>
70 #include <IOKit/IOBSD.h>
71 #include <sys/errno.h>
72 #include <sys/msgbuf.h>
73 #include <san/kasan.h>
74 #include <kern/debug.h>
75 #include <pexpert/pexpert.h>
76 
77 #if defined(__x86_64__)
78 #include <i386/pmap_internal.h>
79 #include <kdp/ml/i386/kdp_x86_common.h>
80 #include <kern/debug.h>
81 #endif /* defined(__x86_64__) */
82 
83 kern_return_t kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context);
84 kern_return_t kdp_core_polled_io_polled_file_unavailable(void);
85 
86 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
87     vm_map_offset_t end,
88     void *context);
89 
90 extern int pmap_traverse_present_mappings(pmap_t pmap,
91     vm_map_offset_t start,
92     vm_map_offset_t end,
93     pmap_traverse_callback callback,
94     void *context);
95 
96 static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
97 static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
98 static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
99 static int kern_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context);
100 static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
101 
102 static int
103 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
104     vm_map_offset_t end,
105     void *context);
106 static int
107 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
108     vm_map_offset_t end,
109     void *context);
110 
111 static int
112 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
113     vm_map_offset_t end,
114     void *context);
115 
116 static struct kdp_output_stage disk_output_stage = {};
117 static struct kdp_output_stage zlib_output_stage = {};
118 static struct kdp_output_stage buffer_output_stage = {};
119 static struct kdp_output_stage net_output_stage = {};
120 static struct kdp_output_stage progress_notify_output_stage = {};
121 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
122 static struct kdp_output_stage aea_output_stage = {};
123 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
124 #if defined(__arm__) || defined(__arm64__)
125 static struct kdp_output_stage shmem_output_stage = {};
126 #endif /* defined(__arm__) || defined(__arm64__) */
127 
128 extern uint32_t kdp_crashdump_pkt_size;
129 
130 static boolean_t kern_dump_successful = FALSE;
131 
132 static const size_t kdp_core_header_size = sizeof(struct mach_core_fileheader_v2) + (KERN_COREDUMP_MAX_CORES * sizeof(struct mach_core_details_v2));
133 static struct mach_core_fileheader_v2 *kdp_core_header = NULL;
134 
135 static lck_grp_t *kdp_core_initialization_lock_group = NULL;
136 static lck_mtx_t *kdp_core_disk_stage_lock = NULL;
137 static bool kdp_core_is_initializing_disk_stage = false;
138 
139 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
140 static const size_t PUBLIC_KEY_RESERVED_LENGTH = roundup(4096, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
141 static void *kdp_core_public_key = NULL;
142 static lck_mtx_t *kdp_core_encryption_stage_lock = NULL;
143 static bool kdp_core_is_initializing_encryption_stage = false;
144 
145 static bool kern_dump_should_enforce_encryption(void);
146 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
147 
148 /*
149  * These variables will be modified by the BSD layer if the root device is
150  * a RAMDisk.
151  */
152 uint64_t kdp_core_ramdisk_addr = 0;
153 uint64_t kdp_core_ramdisk_size = 0;
154 
155 #define COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY (1 << 0)
156 #define COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT  (1 << 1)
157 
158 boolean_t
kdp_has_polled_corefile(void)159 kdp_has_polled_corefile(void)
160 {
161 	return NULL != gIOPolledCoreFileVars;
162 }
163 
164 kern_return_t
kdp_polled_corefile_error(void)165 kdp_polled_corefile_error(void)
166 {
167 	return gIOPolledCoreFileOpenRet;
168 }
169 
170 kern_return_t
kdp_core_output(void * kdp_core_out_state,uint64_t length,void * data)171 kdp_core_output(void *kdp_core_out_state, uint64_t length, void * data)
172 {
173 	kern_return_t              err = KERN_SUCCESS;
174 	uint64_t                   percent;
175 	struct kdp_core_out_state *vars = (struct kdp_core_out_state *)kdp_core_out_state;
176 	struct kdp_output_stage   *first_stage = STAILQ_FIRST(&vars->kcos_out_stage);
177 
178 	if (vars->kcos_error == KERN_SUCCESS) {
179 		if ((err = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, length, data)) != KERN_SUCCESS) {
180 			kern_coredump_log(NULL, "(kdp_core_output) outproc(KDP_DATA, NULL, 0x%llx, %p) returned 0x%x\n",
181 			    length, data, err);
182 			vars->kcos_error = err;
183 		}
184 		if (!data && !length) {
185 			kern_coredump_log(NULL, "100..");
186 		} else {
187 			vars->kcos_bytes_written += length;
188 			percent = (vars->kcos_bytes_written * 100) / vars->kcos_totalbytes;
189 			if ((percent - vars->kcos_lastpercent) >= 10) {
190 				vars->kcos_lastpercent = percent;
191 				kern_coredump_log(NULL, "%lld..\n", percent);
192 			}
193 		}
194 	}
195 	return err;
196 }
197 
198 #if defined(__arm__) || defined(__arm64__)
199 extern pmap_paddr_t avail_start, avail_end;
200 extern struct vm_object pmap_object_store;
201 #endif
202 extern vm_offset_t c_buffers;
203 extern vm_size_t   c_buffers_size;
204 
205 static bool
kernel_vaddr_in_coredump_stage(const struct kdp_output_stage * stage,uint64_t vaddr,uint64_t * vincr)206 kernel_vaddr_in_coredump_stage(const struct kdp_output_stage *stage, uint64_t vaddr, uint64_t *vincr)
207 {
208 	uint64_t start_addr = (uint64_t)stage->kos_data;
209 	uint64_t end_addr = start_addr + stage->kos_data_size;
210 
211 	if (!stage->kos_data) {
212 		return false;
213 	}
214 
215 	if (vaddr >= start_addr && vaddr < end_addr) {
216 		*vincr = stage->kos_data_size - (vaddr - start_addr);
217 		return true;
218 	}
219 
220 	return false;
221 }
222 
223 static bool
kernel_vaddr_in_coredump_stages(uint64_t vaddr,uint64_t * vincr)224 kernel_vaddr_in_coredump_stages(uint64_t vaddr, uint64_t *vincr)
225 {
226 	if (kernel_vaddr_in_coredump_stage(&disk_output_stage, vaddr, vincr)) {
227 		return true;
228 	}
229 
230 	if (kernel_vaddr_in_coredump_stage(&zlib_output_stage, vaddr, vincr)) {
231 		return true;
232 	}
233 
234 	if (kernel_vaddr_in_coredump_stage(&buffer_output_stage, vaddr, vincr)) {
235 		return true;
236 	}
237 
238 	if (kernel_vaddr_in_coredump_stage(&net_output_stage, vaddr, vincr)) {
239 		return true;
240 	}
241 
242 	if (kernel_vaddr_in_coredump_stage(&progress_notify_output_stage, vaddr, vincr)) {
243 		return true;
244 	}
245 
246 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
247 	if (kernel_vaddr_in_coredump_stage(&aea_output_stage, vaddr, vincr)) {
248 		return true;
249 	}
250 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
251 
252 #if defined(__arm__) || defined(__arm64__)
253 	if (kernel_vaddr_in_coredump_stage(&shmem_output_stage, vaddr, vincr)) {
254 		return true;
255 	}
256 #endif /* defined(__arm__) || defined(__arm64__) */
257 
258 	return false;
259 }
260 
261 ppnum_t
kernel_pmap_present_mapping(uint64_t vaddr,uint64_t * pvincr,uintptr_t * pvphysaddr)262 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
263 {
264 	ppnum_t ppn = 0;
265 	uint64_t vincr = PAGE_SIZE_64;
266 
267 	assert(!(vaddr & PAGE_MASK_64));
268 
269 	/* VA ranges to exclude */
270 	if (vaddr == c_buffers) {
271 		/* compressor data */
272 		ppn = 0;
273 		vincr = c_buffers_size;
274 	} else if (kernel_vaddr_in_coredump_stages(vaddr, &vincr)) {
275 		/* coredump output stage working memory */
276 		ppn = 0;
277 	} else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) {
278 		ppn = 0;
279 		vincr = kdp_core_ramdisk_size;
280 	} else
281 #if defined(__arm__) || defined(__arm64__)
282 	if (vaddr == phystokv(avail_start)) {
283 		/* physical memory map */
284 		ppn = 0;
285 		vincr = (avail_end - avail_start);
286 	} else
287 #endif /* defined(__arm__) || defined(__arm64__) */
288 	{
289 		ppn = (pvphysaddr != NULL ?
290 		    pmap_find_phys(kernel_pmap, vaddr) :
291 		    pmap_find_phys_nofault(kernel_pmap, vaddr));
292 	}
293 
294 	*pvincr = round_page_64(vincr);
295 
296 	if (ppn && pvphysaddr) {
297 		uint64_t phys = ptoa_64(ppn);
298 		if (physmap_enclosed(phys)) {
299 			*pvphysaddr = phystokv(phys);
300 		} else {
301 			ppn = 0;
302 		}
303 	}
304 
305 	return ppn;
306 }
307 
308 int
pmap_traverse_present_mappings(pmap_t __unused pmap,vm_map_offset_t start,vm_map_offset_t end,pmap_traverse_callback callback,void * context)309 pmap_traverse_present_mappings(pmap_t __unused pmap,
310     vm_map_offset_t start,
311     vm_map_offset_t end,
312     pmap_traverse_callback callback,
313     void *context)
314 {
315 	IOReturn        ret;
316 	vm_map_offset_t vcurstart, vcur;
317 	uint64_t        vincr = 0;
318 	vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
319 	vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
320 #if defined(XNU_TARGET_OS_BRIDGE)
321 	vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base);
322 	vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size));
323 #endif
324 
325 	boolean_t       lastvavalid;
326 #if defined(__arm__) || defined(__arm64__)
327 	vm_page_t m = VM_PAGE_NULL;
328 #endif
329 
330 #if defined(__x86_64__)
331 	assert(!is_ept_pmap(pmap));
332 #endif
333 
334 	/* Assumes pmap is locked, or being called from the kernel debugger */
335 
336 	if (start > end) {
337 		return KERN_INVALID_ARGUMENT;
338 	}
339 
340 	ret = KERN_SUCCESS;
341 	lastvavalid = FALSE;
342 	for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) {
343 		ppnum_t ppn = 0;
344 
345 #if defined(__arm__) || defined(__arm64__)
346 		/* We're at the start of the physmap, so pull out the pagetable pages that
347 		 * are accessed through that region.*/
348 		if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) {
349 			m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
350 		}
351 
352 		if (m != VM_PAGE_NULL) {
353 			vm_map_offset_t vprev = vcur;
354 			ppn = (ppnum_t)atop(avail_end);
355 			while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) {
356 				/* Ignore pages that come from the static region and have already been dumped.*/
357 				if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) {
358 					ppn = VM_PAGE_GET_PHYS_PAGE(m);
359 					break;
360 				}
361 				m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
362 			}
363 			vincr = PAGE_SIZE_64;
364 			if (ppn == atop(avail_end)) {
365 				vm_object_unlock(&pmap_object_store);
366 				m = VM_PAGE_NULL;
367 				// avail_end is not a valid physical address,
368 				// so phystokv(avail_end) may not produce the expected result.
369 				vcur = phystokv(avail_start) + (avail_end - avail_start);
370 			} else {
371 				m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
372 				vcur = phystokv(ptoa(ppn));
373 			}
374 			if (vcur != vprev) {
375 				ret = callback(vcurstart, vprev, context);
376 				lastvavalid = FALSE;
377 			}
378 		}
379 		if (m == VM_PAGE_NULL) {
380 			ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
381 		}
382 #else /* defined(__arm__) || defined(__arm64__) */
383 		ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
384 #endif
385 		if (ppn != 0) {
386 			if (((vcur < debug_start) || (vcur >= debug_end))
387 			    && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
388 #if defined(XNU_TARGET_OS_BRIDGE)
389 			    // include the macOS panic region if it's mapped
390 			    && ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
391 #endif
392 			    ) {
393 				/* not something we want */
394 				ppn = 0;
395 			}
396 			/* include the phys carveout only if explictly marked */
397 			if ((debug_is_in_phys_carveout(vcur) || debug_is_in_phys_carveout_metadata(vcur)) &&
398 			    !debug_can_coredump_phys_carveout()) {
399 				ppn = 0;
400 			}
401 		}
402 
403 		if (ppn != 0) {
404 			if (!lastvavalid) {
405 				/* Start of a new virtual region */
406 				vcurstart = vcur;
407 				lastvavalid = TRUE;
408 			}
409 		} else {
410 			if (lastvavalid) {
411 				/* end of a virtual region */
412 				ret = callback(vcurstart, vcur, context);
413 				lastvavalid = FALSE;
414 			}
415 
416 #if defined(__x86_64__)
417 			/* Try to skip by 2MB if possible */
418 			if ((vcur & PDMASK) == 0) {
419 				pd_entry_t *pde;
420 				pde = pmap_pde(pmap, vcur);
421 				if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
422 					/* Make sure we wouldn't overflow */
423 					if (vcur < (end - NBPD)) {
424 						vincr = NBPD;
425 					}
426 				}
427 			}
428 #endif /* defined(__x86_64__) */
429 		}
430 		vcur += vincr;
431 	}
432 
433 	if ((ret == KERN_SUCCESS) && lastvavalid) {
434 		/* send previous run */
435 		ret = callback(vcurstart, vcur, context);
436 	}
437 
438 #if KASAN
439 	if (ret == KERN_SUCCESS) {
440 		ret = kasan_traverse_mappings(callback, context);
441 	}
442 #endif
443 
444 	return ret;
445 }
446 
447 struct kern_dump_preflight_context {
448 	uint32_t region_count;
449 	uint64_t dumpable_bytes;
450 };
451 
452 int
kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)453 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
454     vm_map_offset_t end,
455     void *context)
456 {
457 	struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
458 	IOReturn ret = KERN_SUCCESS;
459 
460 	kdc->region_count++;
461 	kdc->dumpable_bytes += (end - start);
462 
463 	return ret;
464 }
465 
466 
467 struct kern_dump_send_seg_desc_context {
468 	core_save_segment_descriptions_cb callback;
469 	void *context;
470 };
471 
472 int
kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)473 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
474     vm_map_offset_t end,
475     void *context)
476 {
477 	struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
478 	uint64_t seg_start = (uint64_t) start;
479 	uint64_t seg_end = (uint64_t) end;
480 
481 	return kds_context->callback(seg_start, seg_end, kds_context->context);
482 }
483 
484 struct kern_dump_send_segdata_context {
485 	core_save_segment_data_cb callback;
486 	void *context;
487 };
488 
489 int
kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)490 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
491     vm_map_offset_t end,
492     void *context)
493 {
494 	struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
495 
496 	return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
497 }
498 
499 static int
kern_dump_save_summary(__unused void * refcon,core_save_summary_cb callback,void * context)500 kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
501 {
502 	struct kern_dump_preflight_context kdc_preflight = { };
503 	uint64_t thread_state_size = 0, thread_count = 0;
504 	vm_map_offset_t vstart = kdp_core_start_addr();
505 	kern_return_t ret;
506 
507 	ret = pmap_traverse_present_mappings(kernel_pmap,
508 	    vstart,
509 	    VM_MAX_KERNEL_ADDRESS,
510 	    kern_dump_pmap_traverse_preflight_callback,
511 	    &kdc_preflight);
512 	if (ret != KERN_SUCCESS) {
513 		kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
514 		return ret;
515 	}
516 
517 	kern_collectth_state_size(&thread_count, &thread_state_size);
518 
519 	ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
520 	    thread_count, thread_state_size, 0, context);
521 	return ret;
522 }
523 
524 static int
kern_dump_save_seg_descriptions(__unused void * refcon,core_save_segment_descriptions_cb callback,void * context)525 kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
526 {
527 	vm_map_offset_t vstart = kdp_core_start_addr();
528 	kern_return_t ret;
529 	struct kern_dump_send_seg_desc_context kds_context;
530 
531 	kds_context.callback = callback;
532 	kds_context.context = context;
533 
534 	ret = pmap_traverse_present_mappings(kernel_pmap,
535 	    vstart,
536 	    VM_MAX_KERNEL_ADDRESS,
537 	    kern_dump_pmap_traverse_send_segdesc_callback,
538 	    &kds_context);
539 	if (ret != KERN_SUCCESS) {
540 		kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
541 		return ret;
542 	}
543 
544 	return KERN_SUCCESS;
545 }
546 
547 static int
kern_dump_save_thread_state(__unused void * refcon,void * buf,core_save_thread_state_cb callback,void * context)548 kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
549 {
550 	kern_return_t ret;
551 	uint64_t thread_state_size = 0, thread_count = 0;
552 
553 	kern_collectth_state_size(&thread_count, &thread_state_size);
554 
555 	if (thread_state_size > 0) {
556 		void * iter = NULL;
557 		do {
558 			kern_collectth_state(current_thread(), buf, thread_state_size, &iter);
559 
560 			ret = callback(buf, context);
561 			if (ret != KERN_SUCCESS) {
562 				return ret;
563 			}
564 		} while (iter);
565 	}
566 
567 	return KERN_SUCCESS;
568 }
569 
570 
571 static int
kern_dump_save_sw_vers_detail(__unused void * refcon,core_save_sw_vers_detail_cb callback,void * context)572 kern_dump_save_sw_vers_detail(__unused void *refcon, core_save_sw_vers_detail_cb callback, void *context)
573 {
574 	return callback(vm_kernel_stext, kernel_uuid, 0, context);
575 }
576 
577 static int
kern_dump_save_segment_data(__unused void * refcon,core_save_segment_data_cb callback,void * context)578 kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
579 {
580 	vm_map_offset_t vstart = kdp_core_start_addr();
581 	kern_return_t ret;
582 	struct kern_dump_send_segdata_context kds_context;
583 
584 	kds_context.callback = callback;
585 	kds_context.context = context;
586 
587 	ret = pmap_traverse_present_mappings(kernel_pmap,
588 	    vstart,
589 	    VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
590 	if (ret != KERN_SUCCESS) {
591 		kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
592 		return ret;
593 	}
594 
595 	return KERN_SUCCESS;
596 }
597 
598 kern_return_t
kdp_reset_output_vars(void * kdp_core_out_state,uint64_t totalbytes,bool encrypt_core,bool * out_should_skip_coredump)599 kdp_reset_output_vars(void *kdp_core_out_state, uint64_t totalbytes, bool encrypt_core, bool *out_should_skip_coredump)
600 {
601 	struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
602 	struct kdp_output_stage *current_stage = NULL;
603 
604 	/* Re-initialize kdp_outstate */
605 	outstate->kcos_totalbytes = totalbytes;
606 	outstate->kcos_bytes_written = 0;
607 	outstate->kcos_lastpercent = 0;
608 	outstate->kcos_error = KERN_SUCCESS;
609 
610 	/* Reset the output stages */
611 	STAILQ_FOREACH(current_stage, &outstate->kcos_out_stage, kos_next) {
612 		current_stage->kos_funcs.kosf_reset(current_stage);
613 	}
614 
615 	*out_should_skip_coredump = false;
616 	if (encrypt_core) {
617 		if (outstate->kcos_enforce_encryption && !outstate->kcos_encryption_stage) {
618 			*out_should_skip_coredump = true;
619 #if defined(__arm__) || defined(__arm64__)
620 			panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
621 #else
622 			panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
623 #endif
624 			kern_coredump_log(NULL, "(kdp_reset_output_vars) Encryption requested, is unavailable, and enforcement is active. Skipping current core.\n");
625 		}
626 	} else if (outstate->kcos_encryption_stage) {
627 		outstate->kcos_encryption_stage->kos_bypass = true;
628 	}
629 
630 	return KERN_SUCCESS;
631 }
632 
633 static kern_return_t
kern_dump_update_header(struct kdp_core_out_state * outstate)634 kern_dump_update_header(struct kdp_core_out_state *outstate)
635 {
636 	struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
637 	uint64_t foffset;
638 	kern_return_t ret;
639 
640 	/* Write the file header -- first seek to the beginning of the file */
641 	foffset = 0;
642 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
643 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
644 		    sizeof(foffset), &foffset, foffset, ret);
645 		return ret;
646 	}
647 
648 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header_size, kdp_core_header)) != KERN_SUCCESS) {
649 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
650 		    kdp_core_header_size, kdp_core_header, ret);
651 		return ret;
652 	}
653 
654 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
655 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
656 		return ret;
657 	}
658 
659 #if defined(__arm__) || defined(__arm64__)
660 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
661 		kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
662 		return ret;
663 	}
664 #endif /* defined(__arm__) || defined(__arm64__) */
665 
666 	return ret;
667 }
668 
669 kern_return_t
kern_dump_record_file(void * kdp_core_out_state,const char * filename,uint64_t file_offset,uint64_t * out_file_length)670 kern_dump_record_file(void *kdp_core_out_state, const char *filename, uint64_t file_offset, uint64_t *out_file_length)
671 {
672 	kern_return_t ret = KERN_SUCCESS;
673 	uint64_t bytes_written = 0;
674 	struct mach_core_details_v2 *core_details = NULL;
675 	struct kdp_output_stage *last_stage;
676 	struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
677 
678 	assert(kdp_core_header->num_files < KERN_COREDUMP_MAX_CORES);
679 	assert(out_file_length != NULL);
680 	*out_file_length = 0;
681 
682 	last_stage = STAILQ_LAST(&outstate->kcos_out_stage, kdp_output_stage, kos_next);
683 	bytes_written = last_stage->kos_bytes_written;
684 
685 	core_details = &(kdp_core_header->files[kdp_core_header->num_files]);
686 	core_details->flags = MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
687 	if (outstate->kcos_encryption_stage && outstate->kcos_encryption_stage->kos_bypass == false) {
688 		core_details->flags |= MACH_CORE_DETAILS_V2_FLAG_ENCRYPTED_AEA;
689 	}
690 	core_details->offset = file_offset;
691 	core_details->length = bytes_written;
692 	strncpy((char *)&core_details->core_name, filename,
693 	    MACH_CORE_FILEHEADER_NAMELEN);
694 	core_details->core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
695 
696 	kdp_core_header->num_files++;
697 
698 	ret = kern_dump_update_header(outstate);
699 	if (ret == KERN_SUCCESS) {
700 		*out_file_length = bytes_written;
701 	}
702 
703 	return ret;
704 }
705 
706 kern_return_t
kern_dump_seek_to_next_file(void * kdp_core_out_state,uint64_t next_file_offset)707 kern_dump_seek_to_next_file(void *kdp_core_out_state, uint64_t next_file_offset)
708 {
709 	struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
710 	struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
711 	kern_return_t ret;
712 
713 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != KERN_SUCCESS) {
714 		kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
715 		    sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
716 	}
717 
718 	return ret;
719 }
720 
721 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
722 
723 static kern_return_t
kern_dump_write_public_key(struct kdp_core_out_state * outstate)724 kern_dump_write_public_key(struct kdp_core_out_state *outstate)
725 {
726 	struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
727 	uint64_t foffset;
728 	uint64_t remainder = PUBLIC_KEY_RESERVED_LENGTH - kdp_core_header->pub_key_length;
729 	kern_return_t ret;
730 
731 	if (kdp_core_header->pub_key_offset == 0 || kdp_core_header->pub_key_length == 0) {
732 		// Nothing to do
733 		return KERN_SUCCESS;
734 	}
735 
736 	/* Write the public key -- first seek to the appropriate offset */
737 	foffset = kdp_core_header->pub_key_offset;
738 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
739 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
740 		    sizeof(foffset), &foffset, foffset, ret);
741 		return ret;
742 	}
743 
744 	// Write the public key
745 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
746 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
747 		    kdp_core_header->pub_key_length, kdp_core_public_key, ret);
748 		return ret;
749 	}
750 
751 	// Fill out the remainder of the block with zeroes
752 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
753 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
754 		    remainder, ret);
755 		return ret;
756 	}
757 
758 	// Do it once more to write the "next" public key
759 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
760 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
761 		    kdp_core_header->pub_key_length, kdp_core_public_key, ret);
762 		return ret;
763 	}
764 
765 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
766 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
767 		    remainder, ret);
768 		return ret;
769 	}
770 
771 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
772 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc data flush returned 0x%x\n", ret);
773 		return ret;
774 	}
775 
776 #if defined(__arm__) || defined(__arm64__)
777 	if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
778 		kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc explicit flush returned 0x%x\n", ret);
779 		return ret;
780 	}
781 #endif /* defined(__arm__) || defined(__arm64__) */
782 
783 	return ret;
784 }
785 
786 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
787 
788 static kern_return_t
chain_output_stages(enum kern_dump_type kd_variant,struct kdp_core_out_state * outstate)789 chain_output_stages(enum kern_dump_type kd_variant, struct kdp_core_out_state *outstate)
790 {
791 	struct kdp_output_stage *current = NULL;
792 
793 	switch (kd_variant) {
794 	case KERN_DUMP_STACKSHOT_DISK:
795 		OS_FALLTHROUGH;
796 	case KERN_DUMP_DISK:
797 		if (!kdp_corezip_disabled) {
798 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
799 		}
800 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
801 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
802 		if (kdp_core_is_initializing_encryption_stage) {
803 			kern_coredump_log(NULL, "We were in the middle of initializing encryption. Marking it as unavailable\n");
804 		} else if (aea_output_stage.kos_initialized) {
805 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &aea_output_stage, kos_next);
806 			outstate->kcos_encryption_stage = &aea_output_stage;
807 		}
808 		outstate->kcos_enforce_encryption = kern_dump_should_enforce_encryption();
809 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
810 		if (kdp_core_is_initializing_disk_stage) {
811 			kern_coredump_log(NULL, "We were in the middle of initializing the disk stage. Cannot write a coredump to disk\n");
812 			return KERN_FAILURE;
813 		} else if (disk_output_stage.kos_initialized == false) {
814 			kern_coredump_log(NULL, "Corefile is not yet initialized. Cannot write a coredump to disk\n");
815 			return KERN_FAILURE;
816 		}
817 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &disk_output_stage, kos_next);
818 		break;
819 	case KERN_DUMP_NET:
820 		if (!kdp_corezip_disabled) {
821 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
822 		}
823 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
824 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &buffer_output_stage, kos_next);
825 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &net_output_stage, kos_next);
826 		break;
827 #if defined(__arm__) || defined(__arm64__)
828 	case KERN_DUMP_HW_SHMEM_DBG:
829 		if (!kdp_corezip_disabled) {
830 			STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
831 		}
832 		STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &shmem_output_stage, kos_next);
833 		break;
834 #endif /* defined(__arm__) || defined(__arm64__) */
835 	}
836 
837 	STAILQ_FOREACH(current, &outstate->kcos_out_stage, kos_next) {
838 		current->kos_outstate = outstate;
839 	}
840 
841 	return KERN_SUCCESS;
842 }
843 
844 static int
do_kern_dump(enum kern_dump_type kd_variant)845 do_kern_dump(enum kern_dump_type kd_variant)
846 {
847 	struct kdp_core_out_state outstate = { };
848 	struct kdp_output_stage *first_stage = NULL;
849 	char *coredump_log_start = NULL, *buf = NULL;
850 	size_t reserved_debug_logsize = 0, prior_debug_logsize = 0;
851 	uint64_t foffset = 0;
852 	kern_return_t ret = KERN_SUCCESS;
853 	boolean_t output_opened = FALSE, dump_succeeded = TRUE;
854 
855 	/* Initialize output context */
856 
857 	bzero(&outstate, sizeof(outstate));
858 	STAILQ_INIT(&outstate.kcos_out_stage);
859 	ret = chain_output_stages(kd_variant, &outstate);
860 	if (KERN_SUCCESS != ret) {
861 		dump_succeeded = FALSE;
862 		goto exit;
863 	}
864 	first_stage = STAILQ_FIRST(&outstate.kcos_out_stage);
865 
866 	/*
867 	 * Record the initial panic log buffer length so we can dump the coredump log
868 	 * and panic log to disk
869 	 */
870 	coredump_log_start = debug_buf_ptr;
871 #if defined(__arm__) || defined(__arm64__)
872 	assert(panic_info->eph_other_log_offset != 0);
873 	assert(panic_info->eph_panic_log_len != 0);
874 	/* Include any data from before the panic log as well */
875 	prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
876 	    panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
877 #else /* defined(__arm__) || defined(__arm64__) */
878 	if (panic_info->mph_panic_log_offset != 0) {
879 		prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
880 		    panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
881 	}
882 #endif /* defined(__arm__) || defined(__arm64__) */
883 
884 	assert(prior_debug_logsize <= debug_buf_size);
885 
886 	if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
887 		/* Open the file for output */
888 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_WRQ, NULL, 0, NULL)) != KERN_SUCCESS) {
889 			kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
890 			dump_succeeded = FALSE;
891 			goto exit;
892 		}
893 	}
894 	output_opened = true;
895 
896 	if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
897 		const size_t aligned_corefile_header_size = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
898 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
899 		const size_t aligned_public_key_size = PUBLIC_KEY_RESERVED_LENGTH * 2;
900 #else
901 		const size_t aligned_public_key_size = 0;
902 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
903 
904 		reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE;
905 
906 		/* Space for file header, public key, panic log, core log */
907 		foffset = roundup(aligned_corefile_header_size + aligned_public_key_size + reserved_debug_logsize, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
908 		kdp_core_header->log_offset = aligned_corefile_header_size + aligned_public_key_size;
909 
910 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
911 		/* Write the public key */
912 		ret = kern_dump_write_public_key(&outstate);
913 		if (KERN_SUCCESS != ret) {
914 			kern_coredump_log(NULL, "(do_kern_dump write public key) returned 0x%x\n", ret);
915 			dump_succeeded = FALSE;
916 			goto exit;
917 		}
918 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
919 
920 		/* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
921 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
922 			kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
923 			    sizeof(foffset), &foffset, foffset, ret);
924 			dump_succeeded = FALSE;
925 			goto exit;
926 		}
927 	}
928 
929 #if defined(__arm__) || defined(__arm64__)
930 	flush_mmu_tlb();
931 #endif
932 
933 	kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores...\n" :
934 	    "Transmitting kernel state, please wait:\n");
935 
936 
937 #if defined(__x86_64__)
938 	if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) {
939 		bool should_skip = false;
940 
941 		kern_coredump_log(NULL, "\nBeginning dump of kernel stackshot\n");
942 
943 		ret = kdp_reset_output_vars(&outstate, panic_stackshot_len, true, &should_skip);
944 
945 		if (ret != KERN_SUCCESS) {
946 			kern_coredump_log(NULL, "Failed to reset outstate for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
947 			dump_succeeded = FALSE;
948 		} else if (!should_skip) {
949 			uint64_t compressed_stackshot_len = 0;
950 			if ((ret = kdp_core_output(&outstate, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
951 				kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outstate, %lu, %p) returned 0x%x\n",
952 				    panic_stackshot_len, (void *) panic_stackshot_buf, ret);
953 				dump_succeeded = FALSE;
954 			} else if ((ret = kdp_core_output(&outstate, 0, NULL)) != KERN_SUCCESS) {
955 				kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outstate, ret);
956 				dump_succeeded = FALSE;
957 			} else if ((ret = kern_dump_record_file(&outstate, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) {
958 				kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
959 				dump_succeeded = FALSE;
960 			} else {
961 				kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
962 				foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
963 				if ((ret = kern_dump_seek_to_next_file(&outstate, foffset)) != KERN_SUCCESS) {
964 					kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
965 					dump_succeeded = FALSE;
966 				}
967 			}
968 		} else {
969 			kern_coredump_log(NULL, "Skipping stackshot dump\n");
970 		}
971 	}
972 #endif
973 
974 	if (kd_variant == KERN_DUMP_DISK) {
975 		/*
976 		 * Dump co-processors as well, foffset will be overwritten with the
977 		 * offset of the next location in the file to be written to.
978 		 */
979 		if (kern_do_coredump(&outstate, FALSE, foffset, &foffset) != 0) {
980 			dump_succeeded = FALSE;
981 		}
982 	} else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) {
983 		/* Only the kernel */
984 		if (kern_do_coredump(&outstate, TRUE, foffset, &foffset) != 0) {
985 			dump_succeeded = FALSE;
986 		}
987 	}
988 
989 	if (kd_variant == KERN_DUMP_DISK) {
990 		assert(reserved_debug_logsize != 0);
991 		size_t remaining_debug_logspace = reserved_debug_logsize;
992 
993 		/* Write the debug log -- first seek to the end of the corefile header */
994 		foffset = kdp_core_header->log_offset;
995 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
996 			kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
997 			    sizeof(foffset), &foffset, foffset, ret);
998 			dump_succeeded = FALSE;
999 			goto exit;
1000 		}
1001 
1002 		/* First flush the data from just the paniclog */
1003 		size_t initial_log_length = 0;
1004 #if defined(__arm__) || defined(__arm64__)
1005 		initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1006 		    panic_info->eph_panic_log_len;
1007 #else
1008 		if (panic_info->mph_panic_log_offset != 0) {
1009 			initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1010 			    panic_info->mph_panic_log_len;
1011 		}
1012 #endif
1013 
1014 		buf = debug_buf_base;
1015 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, initial_log_length, buf)) != KERN_SUCCESS) {
1016 			kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1017 			    initial_log_length, buf, ret);
1018 			dump_succeeded = FALSE;
1019 			goto exit;
1020 		}
1021 
1022 		remaining_debug_logspace -= initial_log_length;
1023 
1024 		/* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1025 #if defined(__arm__) || defined(__arm64__)
1026 		buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1027 #else
1028 		/*
1029 		 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1030 		 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1031 		 * we began taking a coredump.
1032 		 */
1033 		if (panic_info->mph_other_log_offset != 0) {
1034 			buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1035 		} else {
1036 			buf = coredump_log_start;
1037 		}
1038 #endif
1039 		assert(debug_buf_ptr >= buf);
1040 
1041 		size_t other_log_length = debug_buf_ptr - buf;
1042 		if (other_log_length > remaining_debug_logspace) {
1043 			other_log_length = remaining_debug_logspace;
1044 		}
1045 
1046 		/* Write the coredump log */
1047 		if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, other_log_length, buf)) != KERN_SUCCESS) {
1048 			kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1049 			    other_log_length, buf, ret);
1050 			dump_succeeded = FALSE;
1051 			goto exit;
1052 		}
1053 
1054 		kdp_core_header->log_length = initial_log_length + other_log_length;
1055 		kern_dump_update_header(&outstate);
1056 	}
1057 
1058 exit:
1059 	/* close / last packet */
1060 	if (output_opened && (ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_EOF, NULL, 0, ((void *) 0))) != KERN_SUCCESS) {
1061 		kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1062 		dump_succeeded = FALSE;
1063 	}
1064 
1065 	/* If applicable, update the panic header and flush it so we update the CRC */
1066 #if defined(__arm__) || defined(__arm64__)
1067 	panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1068 	    EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1069 	paniclog_flush();
1070 #else
1071 	if (panic_info->mph_panic_log_offset != 0) {
1072 		panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1073 		    MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1074 		paniclog_flush();
1075 	}
1076 #endif
1077 
1078 	return dump_succeeded ? 0 : -1;
1079 }
1080 
1081 boolean_t
dumped_kernel_core(void)1082 dumped_kernel_core(void)
1083 {
1084 	return kern_dump_successful;
1085 }
1086 
1087 int
kern_dump(enum kern_dump_type kd_variant)1088 kern_dump(enum kern_dump_type kd_variant)
1089 {
1090 	static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1091 	int ret = -1;
1092 #if KASAN
1093 	kasan_kdp_disable();
1094 #endif
1095 	if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1096 		if (dumped_local) {
1097 			return 0;
1098 		}
1099 		if (local_dump_in_progress) {
1100 			return -1;
1101 		}
1102 		local_dump_in_progress = TRUE;
1103 #if defined(__arm__) || defined(__arm64__)
1104 		shmem_mark_as_busy();
1105 #endif
1106 		ret = do_kern_dump(kd_variant);
1107 		if (ret == 0) {
1108 			dumped_local = TRUE;
1109 			kern_dump_successful = TRUE;
1110 			local_dump_in_progress = FALSE;
1111 		}
1112 
1113 		return ret;
1114 #if defined(__arm__) || defined(__arm64__)
1115 	} else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1116 		ret = do_kern_dump(kd_variant);
1117 		if (ret == 0) {
1118 			kern_dump_successful = TRUE;
1119 		}
1120 		return ret;
1121 #endif
1122 	} else {
1123 		ret = do_kern_dump(kd_variant);
1124 		if (ret == 0) {
1125 			kern_dump_successful = TRUE;
1126 		}
1127 		return ret;
1128 	}
1129 }
1130 
1131 static kern_return_t
kdp_core_init_output_stages(void)1132 kdp_core_init_output_stages(void)
1133 {
1134 	kern_return_t ret = KERN_SUCCESS;
1135 
1136 	// We only zero-out the disk stage. It will be initialized
1137 	// later on when the corefile is initialized
1138 	bzero(&disk_output_stage, sizeof(disk_output_stage));
1139 
1140 	bzero(&zlib_output_stage, sizeof(zlib_output_stage));
1141 	ret = zlib_stage_initialize(&zlib_output_stage);
1142 	if (KERN_SUCCESS != ret) {
1143 		return ret;
1144 	}
1145 
1146 	bzero(&buffer_output_stage, sizeof(buffer_output_stage));
1147 	ret = buffer_stage_initialize(&buffer_output_stage, kdp_crashdump_pkt_size);
1148 	if (KERN_SUCCESS != ret) {
1149 		return ret;
1150 	}
1151 
1152 	bzero(&net_output_stage, sizeof(net_output_stage));
1153 	ret = net_stage_initialize(&net_output_stage);
1154 	if (KERN_SUCCESS != ret) {
1155 		return ret;
1156 	}
1157 
1158 	bzero(&progress_notify_output_stage, sizeof(progress_notify_output_stage));
1159 	ret = progress_notify_stage_initialize(&progress_notify_output_stage);
1160 	if (KERN_SUCCESS != ret) {
1161 		return ret;
1162 	}
1163 
1164 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1165 	// We only zero-out the AEA stage. It will be initialized
1166 	// later on, if it's supported and needed
1167 	bzero(&aea_output_stage, sizeof(aea_output_stage));
1168 	aea_stage_monitor_availability();
1169 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1170 
1171 #if defined(__arm__) || defined(__arm64__)
1172 	bzero(&shmem_output_stage, sizeof(shmem_output_stage));
1173 	if (PE_consistent_debug_enabled() && PE_i_can_has_debugger(NULL)) {
1174 		ret = shmem_stage_initialize(&shmem_output_stage);
1175 		if (KERN_SUCCESS != ret) {
1176 			return ret;
1177 		}
1178 	}
1179 #endif /* defined(__arm__) || defined(__arm64__) */
1180 
1181 	return ret;
1182 }
1183 
1184 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1185 
1186 static bool
kern_dump_should_enforce_encryption(void)1187 kern_dump_should_enforce_encryption(void)
1188 {
1189 	static int enforce_encryption = -1;
1190 
1191 	// Only check once
1192 	if (enforce_encryption == -1) {
1193 		uint32_t coredump_encryption_flags = 0;
1194 
1195 		// When set, the boot-arg is the sole decider
1196 		if (!kernel_debugging_restricted() &&
1197 		    PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags))) {
1198 			enforce_encryption = (coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT) != 0 ? 1 : 0;
1199 		} else {
1200 			enforce_encryption = 0;
1201 		}
1202 	}
1203 
1204 	return enforce_encryption != 0;
1205 }
1206 
1207 static bool
kern_dump_is_encryption_available(void)1208 kern_dump_is_encryption_available(void)
1209 {
1210 	// Default to feature enabled unless boot-arg says otherwise
1211 	uint32_t coredump_encryption_flags = COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY;
1212 
1213 	if (!kernel_debugging_restricted()) {
1214 		PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags));
1215 	}
1216 
1217 	if ((coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY) == 0) {
1218 		return false;
1219 	}
1220 
1221 	return aea_stage_is_available();
1222 }
1223 
1224 /*
1225  * Initialize (or de-initialize) the encryption stage. This is done in a way such that if initializing the
1226  * encryption stage with a new key fails, then the existing encryption stage is left untouched. Once
1227  * the new stage is initialized, the old stage is uninitialized.
1228  *
1229  * This function is called whenever we have a new public key (whether from someone calling our sysctl, or because
1230  * we read it out of a corefile), or when encryption becomes available.
1231  *
1232  * Parameters:
1233  *  - public_key:      The public key to use when initializing the encryption stage. Can be NULL to indicate that
1234  *                     the encryption stage should be de-initialized.
1235  *  - public_key_size: The size of the given public key.
1236  */
1237 static kern_return_t
kdp_core_init_encryption_stage(void * public_key,size_t public_key_size)1238 kdp_core_init_encryption_stage(void *public_key, size_t public_key_size)
1239 {
1240 	kern_return_t ret = KERN_SUCCESS;
1241 	struct kdp_output_stage new_encryption_stage = {};
1242 	struct kdp_output_stage old_encryption_stage = {};
1243 
1244 	lck_mtx_assert(kdp_core_encryption_stage_lock, LCK_MTX_ASSERT_OWNED);
1245 
1246 	bzero(&new_encryption_stage, sizeof(new_encryption_stage));
1247 
1248 	if (public_key && kern_dump_is_encryption_available()) {
1249 		ret = aea_stage_initialize(&new_encryption_stage, public_key, public_key_size);
1250 		if (KERN_SUCCESS != ret) {
1251 			printf("(kdp_core_init_encryption_stage) Failed to initialize the encryption stage. Error 0x%x\n", ret);
1252 			return ret;
1253 		}
1254 	}
1255 
1256 	bcopy(&aea_output_stage, &old_encryption_stage, sizeof(aea_output_stage));
1257 
1258 	bcopy(&new_encryption_stage, &aea_output_stage, sizeof(new_encryption_stage));
1259 
1260 	if (old_encryption_stage.kos_initialized && old_encryption_stage.kos_funcs.kosf_free) {
1261 		old_encryption_stage.kos_funcs.kosf_free(&old_encryption_stage);
1262 	}
1263 
1264 	return KERN_SUCCESS;
1265 }
1266 
1267 kern_return_t
kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data,void * access_context,void * recipient_context)1268 kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context)
1269 {
1270 	kern_return_t ret = KERN_SUCCESS;
1271 	struct kdp_core_encryption_key_descriptor *key_descriptor = (struct kdp_core_encryption_key_descriptor *) recipient_context;
1272 	void *old_public_key = NULL;
1273 	size_t old_public_key_size = 0;
1274 
1275 	if (!key_descriptor) {
1276 		return kIOReturnBadArgument;
1277 	}
1278 
1279 	lck_mtx_lock(kdp_core_encryption_stage_lock);
1280 	kdp_core_is_initializing_encryption_stage = true;
1281 
1282 	do {
1283 		// Do the risky part first, and bail out cleanly if it fails
1284 		ret = kdp_core_init_encryption_stage(key_descriptor->kcekd_key, key_descriptor->kcekd_size);
1285 		if (ret != KERN_SUCCESS) {
1286 			printf("kdp_core_handle_new_encryption_key failed to re-initialize encryption stage. Error 0x%x\n", ret);
1287 			break;
1288 		}
1289 
1290 		// The rest of this function should technically never fail
1291 
1292 		old_public_key = kdp_core_public_key;
1293 		old_public_key_size = kdp_core_header->pub_key_length;
1294 
1295 		kdp_core_public_key = key_descriptor->kcekd_key;
1296 		kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1297 		kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_EXISTING_COREFILE_KEY_FORMAT_MASK;
1298 		if (key_descriptor->kcekd_key) {
1299 			kdp_core_header->flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1300 			kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(key_descriptor->kcekd_format);
1301 			kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1302 			kdp_core_header->pub_key_length = key_descriptor->kcekd_size;
1303 		} else {
1304 			kdp_core_header->pub_key_offset = 0;
1305 			kdp_core_header->pub_key_length = 0;
1306 		}
1307 
1308 		if (old_public_key) {
1309 			kmem_free(kernel_map, (vm_offset_t) old_public_key, old_public_key_size);
1310 		}
1311 
1312 		// If this stuff fails, we have bigger problems
1313 		struct mach_core_fileheader_v2 existing_header;
1314 		bool used_existing_header = false;
1315 		ret = access_data(access_context, FALSE, 0, sizeof(existing_header), &existing_header);
1316 		if (ret != KERN_SUCCESS) {
1317 			printf("kdp_core_handle_new_encryption_key failed to read the existing corefile header. Error 0x%x\n", ret);
1318 			break;
1319 		}
1320 
1321 		if (existing_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1322 		    && existing_header.version == 2
1323 		    && (existing_header.pub_key_length == 0
1324 		    || kdp_core_header->pub_key_length == 0
1325 		    || existing_header.pub_key_length == kdp_core_header->pub_key_length)) {
1326 			used_existing_header = true;
1327 			existing_header.flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1328 
1329 			if (kdp_core_public_key) {
1330 				existing_header.flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1331 
1332 				if (existing_header.pub_key_offset == 0) {
1333 					existing_header.pub_key_offset = kdp_core_header->pub_key_offset;
1334 					existing_header.pub_key_length = kdp_core_header->pub_key_length;
1335 				}
1336 			}
1337 
1338 			ret = access_data(access_context, TRUE, 0, sizeof(existing_header), &existing_header);
1339 			if (ret != KERN_SUCCESS) {
1340 				printf("kdp_core_handle_new_encryption_key failed to update the existing corefile header. Error 0x%x\n", ret);
1341 				break;
1342 			}
1343 		} else {
1344 			ret = access_data(access_context, TRUE, 0, sizeof(struct mach_core_fileheader_v2), kdp_core_header);
1345 			if (ret != KERN_SUCCESS) {
1346 				printf("kdp_core_handle_new_encryption_key failed to write the corefile header. Error 0x%x\n", ret);
1347 				break;
1348 			}
1349 		}
1350 
1351 		if (kdp_core_header->pub_key_length) {
1352 			uint64_t offset = used_existing_header ? existing_header.pub_key_offset : kdp_core_header->pub_key_offset;
1353 			ret = access_data(access_context, TRUE, offset + PUBLIC_KEY_RESERVED_LENGTH, kdp_core_header->pub_key_length, kdp_core_public_key);
1354 			if (ret != KERN_SUCCESS) {
1355 				printf("kdp_core_handle_new_encryption_key failed to write the next public key. Error 0x%x\n", ret);
1356 				break;
1357 			}
1358 
1359 			if (!used_existing_header) {
1360 				// Everything that happens here is optional. It's not the end of the world if this stuff fails, so we don't return
1361 				// any errors
1362 				// Since we're writing out a completely new header, we make sure to zero-out the region that's reserved for the public key.
1363 				// This allows us consumers of the corefile to know for sure that this corefile is not encrypted (yet). Once we actually
1364 				// write out a corefile, we'll overwrite this region with the key that we ended up using at the time.
1365 				// If we fail to zero-out this region, consumers would read garbage data and properly fail to interpret it as a public key,
1366 				// which is why it is OK for us to fail here (it's hard to interpret garbage data as a valid key, and even then, they wouldn't
1367 				// find a matching private key anyway)
1368 				void *empty_key = NULL;
1369 				kern_return_t temp_ret = KERN_SUCCESS;
1370 
1371 				temp_ret = kmem_alloc_flags(kernel_map, (vm_offset_t *) &empty_key, PUBLIC_KEY_RESERVED_LENGTH, VM_KERN_MEMORY_DIAG, KMA_ZERO);
1372 				if (temp_ret != KERN_SUCCESS) {
1373 					printf("kdp_core_handle_new_encryption_key failed to allocate an empty key. Error 0x%x\n", temp_ret);
1374 					break;
1375 				}
1376 
1377 				temp_ret = access_data(access_context, TRUE, offset, PUBLIC_KEY_RESERVED_LENGTH, empty_key);
1378 				kmem_free(kernel_map, (vm_offset_t) empty_key, PUBLIC_KEY_RESERVED_LENGTH);
1379 
1380 				if (temp_ret != KERN_SUCCESS) {
1381 					printf("kdp_core_handle_new_encryption_key failed to zero-out the public key region. Error 0x%x\n", temp_ret);
1382 					break;
1383 				}
1384 			}
1385 		}
1386 	} while (0);
1387 
1388 	kdp_core_is_initializing_encryption_stage = false;
1389 	lck_mtx_unlock(kdp_core_encryption_stage_lock);
1390 
1391 	return ret;
1392 }
1393 
1394 kern_return_t
kdp_core_handle_encryption_available(void)1395 kdp_core_handle_encryption_available(void)
1396 {
1397 	kern_return_t ret;
1398 
1399 	lck_mtx_lock(kdp_core_encryption_stage_lock);
1400 	kdp_core_is_initializing_encryption_stage = true;
1401 
1402 	ret = kdp_core_init_encryption_stage(kdp_core_public_key, kdp_core_header->pub_key_length);
1403 
1404 	kdp_core_is_initializing_encryption_stage = false;
1405 	lck_mtx_unlock(kdp_core_encryption_stage_lock);
1406 
1407 	return ret;
1408 }
1409 
1410 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1411 
1412 kern_return_t
kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data,void * access_context,__unused void * recipient_context)1413 kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, __unused void *recipient_context)
1414 {
1415 	kern_return_t ret = KERN_SUCCESS;
1416 
1417 	lck_mtx_lock(kdp_core_disk_stage_lock);
1418 	kdp_core_is_initializing_disk_stage = true;
1419 
1420 	ret = disk_stage_initialize(&disk_output_stage);
1421 
1422 	kdp_core_is_initializing_disk_stage = false;
1423 	lck_mtx_unlock(kdp_core_disk_stage_lock);
1424 
1425 	if (KERN_SUCCESS != ret) {
1426 		return ret;
1427 	}
1428 
1429 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1430 	// If someone has already provided a new public key,
1431 	// there's no sense in reading the old one from the corefile.
1432 	if (kdp_core_public_key != NULL) {
1433 		return KERN_SUCCESS;
1434 	}
1435 
1436 	// The kernel corefile is now available. Let's try to retrieve the public key from its
1437 	// header (if available and supported).
1438 
1439 	// First let's read the corefile header itself
1440 	struct mach_core_fileheader_v2 temp_header = {};
1441 	ret = access_data(access_context, FALSE, 0, sizeof(temp_header), &temp_header);
1442 	if (KERN_SUCCESS != ret) {
1443 		printf("kdp_core_polled_io_polled_file_available failed to read corefile header. Error 0x%x\n", ret);
1444 		return ret;
1445 	}
1446 
1447 	// Check if the corefile header is initialized, and whether it's initialized to values that we support
1448 	// (for backwards and forwards) compatibility, and check whether the header indicates that the corefile has
1449 	// has a public key stashed inside of it.
1450 	if (temp_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1451 	    && temp_header.version == 2
1452 	    && temp_header.pub_key_offset != 0
1453 	    && temp_header.pub_key_length != 0
1454 	    /* Future-proofing: make sure it's the key format that we support */
1455 	    && (temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK) == MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256
1456 	    /* Add some extra sanity checks. These are not necessary */
1457 	    && temp_header.pub_key_length <= 4096
1458 	    && temp_header.pub_key_offset < 65535) {
1459 		// The corefile header is properly initialized, is supported, and contains a public key.
1460 		// Let's adopt that public key for our encryption needs
1461 		void *public_key = NULL;
1462 
1463 		ret = kmem_alloc(kernel_map, (vm_offset_t *) &public_key, temp_header.pub_key_length, VM_KERN_MEMORY_DIAG);
1464 		assert(KERN_SUCCESS == ret);
1465 
1466 		// Read the public key from the corefile. Note that the key we're trying to adopt is the "next" key, which is
1467 		// PUBLIC_KEY_RESERVED_LENGTH bytes after the public key.
1468 		ret = access_data(access_context, FALSE, temp_header.pub_key_offset + PUBLIC_KEY_RESERVED_LENGTH, temp_header.pub_key_length, public_key);
1469 		if (KERN_SUCCESS != ret) {
1470 			printf("kdp_core_polled_io_polled_file_available failed to read the public key. Error 0x%x\n", ret);
1471 			kmem_free(kernel_map, (vm_offset_t) public_key, temp_header.pub_key_length);
1472 			return ret;
1473 		}
1474 
1475 		lck_mtx_lock(kdp_core_encryption_stage_lock);
1476 		kdp_core_is_initializing_encryption_stage = true;
1477 
1478 		ret = kdp_core_init_encryption_stage(public_key, temp_header.pub_key_length);
1479 		if (KERN_SUCCESS == ret) {
1480 			kdp_core_header->flags |= temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1481 			kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(temp_header.flags);
1482 			kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1483 			kdp_core_header->pub_key_length = temp_header.pub_key_length;
1484 			kdp_core_public_key = public_key;
1485 		}
1486 
1487 		kdp_core_is_initializing_encryption_stage = false;
1488 		lck_mtx_unlock(kdp_core_encryption_stage_lock);
1489 	}
1490 #else
1491 #pragma unused(access_data, access_context)
1492 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1493 
1494 	return ret;
1495 }
1496 
1497 kern_return_t
kdp_core_polled_io_polled_file_unavailable(void)1498 kdp_core_polled_io_polled_file_unavailable(void)
1499 {
1500 	lck_mtx_lock(kdp_core_disk_stage_lock);
1501 	kdp_core_is_initializing_disk_stage = true;
1502 
1503 	if (disk_output_stage.kos_initialized && disk_output_stage.kos_funcs.kosf_free) {
1504 		disk_output_stage.kos_funcs.kosf_free(&disk_output_stage);
1505 	}
1506 
1507 	kdp_core_is_initializing_disk_stage = false;
1508 	lck_mtx_unlock(kdp_core_disk_stage_lock);
1509 
1510 	return KERN_SUCCESS;
1511 }
1512 
1513 void
kdp_core_init(void)1514 kdp_core_init(void)
1515 {
1516 	kern_return_t kr;
1517 	kern_coredump_callback_config core_config = { };
1518 
1519 	/* Initialize output stages */
1520 	kr = kdp_core_init_output_stages();
1521 	assert(KERN_SUCCESS == kr);
1522 
1523 	kr = kmem_alloc(kernel_map, (vm_offset_t*) &kdp_core_header, kdp_core_header_size, VM_KERN_MEMORY_DIAG);
1524 	assert(KERN_SUCCESS == kr);
1525 
1526 	bzero(kdp_core_header, kdp_core_header_size);
1527 	kdp_core_header->signature = MACH_CORE_FILEHEADER_V2_SIGNATURE;
1528 	kdp_core_header->version = 2;
1529 
1530 	kdp_core_initialization_lock_group = lck_grp_alloc_init("KDPCoreStageInit", LCK_GRP_ATTR_NULL);
1531 	kdp_core_disk_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1532 
1533 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1534 	kdp_core_encryption_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1535 
1536 	(void) kern_dump_should_enforce_encryption();
1537 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1538 
1539 	core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */
1540 	core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1541 	core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1542 	core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1543 	core_config.kcc_coredump_save_sw_vers_detail = kern_dump_save_sw_vers_detail;
1544 	core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1545 
1546 	kr = kern_register_xnu_coredump_helper(&core_config);
1547 	assert(KERN_SUCCESS == kr);
1548 }
1549 
1550 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1551