1 /*
2 * Copyright (c) 2015-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * The main orchestrator for kernel (and co-processor) coredumps. Here's a very simplistic view of
31 * the flow:
32 *
33 * At kernel initialization time (kdp_core_init):
34 * ----------------------------------------------
35 *
36 * - kdp_core_init() takes care of allocating all necessary data structures and initializes the
37 * coredump output stages
38 *
39 * At coredump time (do_kern_dump):
40 * --------------------------------
41 *
42 * - Depending on the coredump variant, we chain the necessary output stages together in chain_output_stages()
43 * - [Disk only] We initialize the corefile header
44 * - [Disk only] We stream the stackshot out through the output stages and update the corefile header
45 * - We perform the kernel coredump, streaming it out through the output stages
46 * - [Disk only] We update the corefile header
47 * - [Disk only] We perform the co-processor coredumps (driven by kern_do_coredump), streaming each out
48 * through the output stages and updating the corefile header.
49 * - [Disk only] We save the coredump log to the corefile
50 */
51
52 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
53
54 #include <mach/mach_types.h>
55 #include <mach/vm_attributes.h>
56 #include <mach/vm_param.h>
57 #include <mach/vm_map.h>
58 #include <vm/vm_protos.h>
59 #include <vm/vm_kern.h>
60 #include <vm/vm_map.h>
61 #include <machine/cpu_capabilities.h>
62 #include <libsa/types.h>
63 #include <libkern/kernel_mach_header.h>
64 #include <kern/locks.h>
65 #include <kdp/kdp_internal.h>
66 #include <kdp/kdp_core.h>
67 #include <kdp/output_stages/output_stages.h>
68 #include <kdp/processor_core.h>
69 #include <IOKit/IOTypes.h>
70 #include <IOKit/IOBSD.h>
71 #include <sys/errno.h>
72 #include <sys/msgbuf.h>
73 #include <san/kasan.h>
74 #include <kern/debug.h>
75 #include <pexpert/pexpert.h>
76
77 #if defined(__x86_64__)
78 #include <i386/pmap_internal.h>
79 #include <kdp/ml/i386/kdp_x86_common.h>
80 #include <kern/debug.h>
81 #endif /* defined(__x86_64__) */
82
83 kern_return_t kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context);
84 kern_return_t kdp_core_polled_io_polled_file_unavailable(void);
85
86 typedef int (*pmap_traverse_callback)(vm_map_offset_t start,
87 vm_map_offset_t end,
88 void *context);
89
90 extern int pmap_traverse_present_mappings(pmap_t pmap,
91 vm_map_offset_t start,
92 vm_map_offset_t end,
93 pmap_traverse_callback callback,
94 void *context);
95
96 static int kern_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context);
97 static int kern_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context);
98 static int kern_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context);
99 static int kern_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context);
100 static int kern_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context);
101
102 static int
103 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
104 vm_map_offset_t end,
105 void *context);
106 static int
107 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
108 vm_map_offset_t end,
109 void *context);
110
111 static int
112 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
113 vm_map_offset_t end,
114 void *context);
115
116 static struct kdp_output_stage disk_output_stage = {};
117 static struct kdp_output_stage zlib_output_stage = {};
118 static struct kdp_output_stage buffer_output_stage = {};
119 static struct kdp_output_stage net_output_stage = {};
120 static struct kdp_output_stage progress_notify_output_stage = {};
121 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
122 static struct kdp_output_stage aea_output_stage = {};
123 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
124 #if defined(__arm__) || defined(__arm64__)
125 static struct kdp_output_stage shmem_output_stage = {};
126 #endif /* defined(__arm__) || defined(__arm64__) */
127 #if defined(__arm64__)
128 static struct kdp_output_stage memory_backing_aware_buffer_output_stage = {};
129 #endif /* defined(__arm64__) */
130
131 extern uint32_t kdp_crashdump_pkt_size;
132
133 static boolean_t kern_dump_successful = FALSE;
134
135 static const size_t kdp_core_header_size = sizeof(struct mach_core_fileheader_v2) + (KERN_COREDUMP_MAX_CORES * sizeof(struct mach_core_details_v2));
136 static struct mach_core_fileheader_v2 *kdp_core_header = NULL;
137
138 static lck_grp_t *kdp_core_initialization_lock_group = NULL;
139 static lck_mtx_t *kdp_core_disk_stage_lock = NULL;
140 static bool kdp_core_is_initializing_disk_stage = false;
141
142 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
143 static const size_t PUBLIC_KEY_RESERVED_LENGTH = roundup(4096, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
144 static void *kdp_core_public_key = NULL;
145 static lck_mtx_t *kdp_core_encryption_stage_lock = NULL;
146 static bool kdp_core_is_initializing_encryption_stage = false;
147
148 static bool kern_dump_should_enforce_encryption(void);
149 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
150
151 /*
152 * These variables will be modified by the BSD layer if the root device is
153 * a RAMDisk.
154 */
155 uint64_t kdp_core_ramdisk_addr = 0;
156 uint64_t kdp_core_ramdisk_size = 0;
157
158 #define COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY (1 << 0)
159 #define COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT (1 << 1)
160
161 boolean_t
kdp_has_polled_corefile(void)162 kdp_has_polled_corefile(void)
163 {
164 return NULL != gIOPolledCoreFileVars;
165 }
166
167 kern_return_t
kdp_polled_corefile_error(void)168 kdp_polled_corefile_error(void)
169 {
170 return gIOPolledCoreFileOpenRet;
171 }
172
173 kern_return_t
kdp_core_output(void * kdp_core_out_state,uint64_t length,void * data)174 kdp_core_output(void *kdp_core_out_state, uint64_t length, void * data)
175 {
176 kern_return_t err = KERN_SUCCESS;
177 uint64_t percent;
178 struct kdp_core_out_state *vars = (struct kdp_core_out_state *)kdp_core_out_state;
179 struct kdp_output_stage *first_stage = STAILQ_FIRST(&vars->kcos_out_stage);
180
181 if (vars->kcos_error == KERN_SUCCESS) {
182 if ((err = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, length, data)) != KERN_SUCCESS) {
183 kern_coredump_log(NULL, "(kdp_core_output) outproc(KDP_DATA, NULL, 0x%llx, %p) returned 0x%x\n",
184 length, data, err);
185 vars->kcos_error = err;
186 }
187 if (!data && !length) {
188 kern_coredump_log(NULL, "100..");
189 } else {
190 vars->kcos_bytes_written += length;
191 percent = (vars->kcos_bytes_written * 100) / vars->kcos_totalbytes;
192 if ((percent - vars->kcos_lastpercent) >= 10) {
193 vars->kcos_lastpercent = percent;
194 kern_coredump_log(NULL, "%lld..\n", percent);
195 }
196 }
197 }
198 return err;
199 }
200
201 #if defined(__arm__) || defined(__arm64__)
202 extern pmap_paddr_t avail_start, avail_end;
203 extern struct vm_object pmap_object_store;
204 #endif
205 extern vm_offset_t c_buffers;
206 extern vm_size_t c_buffers_size;
207
208 static bool
kernel_vaddr_in_coredump_stage(const struct kdp_output_stage * stage,uint64_t vaddr,uint64_t * vincr)209 kernel_vaddr_in_coredump_stage(const struct kdp_output_stage *stage, uint64_t vaddr, uint64_t *vincr)
210 {
211 uint64_t start_addr = (uint64_t)stage->kos_data;
212 uint64_t end_addr = start_addr + stage->kos_data_size;
213
214 if (!stage->kos_data) {
215 return false;
216 }
217
218 if (vaddr >= start_addr && vaddr < end_addr) {
219 *vincr = stage->kos_data_size - (vaddr - start_addr);
220 return true;
221 }
222
223 return false;
224 }
225
226 static bool
kernel_vaddr_in_coredump_stages(uint64_t vaddr,uint64_t * vincr)227 kernel_vaddr_in_coredump_stages(uint64_t vaddr, uint64_t *vincr)
228 {
229 if (kernel_vaddr_in_coredump_stage(&disk_output_stage, vaddr, vincr)) {
230 return true;
231 }
232
233 if (kernel_vaddr_in_coredump_stage(&zlib_output_stage, vaddr, vincr)) {
234 return true;
235 }
236
237 if (kernel_vaddr_in_coredump_stage(&buffer_output_stage, vaddr, vincr)) {
238 return true;
239 }
240
241 if (kernel_vaddr_in_coredump_stage(&net_output_stage, vaddr, vincr)) {
242 return true;
243 }
244
245 if (kernel_vaddr_in_coredump_stage(&progress_notify_output_stage, vaddr, vincr)) {
246 return true;
247 }
248
249 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
250 if (kernel_vaddr_in_coredump_stage(&aea_output_stage, vaddr, vincr)) {
251 return true;
252 }
253 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
254
255 #if defined(__arm__) || defined(__arm64__)
256 if (kernel_vaddr_in_coredump_stage(&shmem_output_stage, vaddr, vincr)) {
257 return true;
258 }
259 #endif /* defined(__arm__) || defined(__arm64__) */
260
261 #if defined(__arm64__)
262 if (kernel_vaddr_in_coredump_stage(&memory_backing_aware_buffer_output_stage, vaddr, vincr)) {
263 return true;
264 }
265 #endif /* defined(__arm64__) */
266
267 return false;
268 }
269
270 ppnum_t
kernel_pmap_present_mapping(uint64_t vaddr,uint64_t * pvincr,uintptr_t * pvphysaddr)271 kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr)
272 {
273 ppnum_t ppn = 0;
274 uint64_t vincr = PAGE_SIZE_64;
275
276 assert(!(vaddr & PAGE_MASK_64));
277
278 /* VA ranges to exclude */
279 if (vaddr == c_buffers) {
280 /* compressor data */
281 ppn = 0;
282 vincr = c_buffers_size;
283 } else if (kernel_vaddr_in_coredump_stages(vaddr, &vincr)) {
284 /* coredump output stage working memory */
285 ppn = 0;
286 } else if ((kdp_core_ramdisk_addr != 0) && (vaddr == kdp_core_ramdisk_addr)) {
287 ppn = 0;
288 vincr = kdp_core_ramdisk_size;
289 } else
290 #if defined(__arm__) || defined(__arm64__)
291 if (vaddr == phystokv(avail_start)) {
292 /* physical memory map */
293 ppn = 0;
294 vincr = (avail_end - avail_start);
295 } else
296 #endif /* defined(__arm__) || defined(__arm64__) */
297 {
298 ppn = (pvphysaddr != NULL ?
299 pmap_find_phys(kernel_pmap, vaddr) :
300 pmap_find_phys_nofault(kernel_pmap, vaddr));
301 }
302
303 *pvincr = round_page_64(vincr);
304
305 if (ppn && pvphysaddr) {
306 uint64_t phys = ptoa_64(ppn);
307 if (physmap_enclosed(phys)) {
308 *pvphysaddr = phystokv(phys);
309 } else {
310 ppn = 0;
311 }
312 }
313
314 return ppn;
315 }
316
317 int
pmap_traverse_present_mappings(pmap_t __unused pmap,vm_map_offset_t start,vm_map_offset_t end,pmap_traverse_callback callback,void * context)318 pmap_traverse_present_mappings(pmap_t __unused pmap,
319 vm_map_offset_t start,
320 vm_map_offset_t end,
321 pmap_traverse_callback callback,
322 void *context)
323 {
324 IOReturn ret;
325 vm_map_offset_t vcurstart, vcur;
326 uint64_t vincr = 0;
327 vm_map_offset_t debug_start = trunc_page((vm_map_offset_t) debug_buf_base);
328 vm_map_offset_t debug_end = round_page((vm_map_offset_t) (debug_buf_base + debug_buf_size));
329 #if defined(XNU_TARGET_OS_BRIDGE)
330 vm_map_offset_t macos_panic_start = trunc_page((vm_map_offset_t) macos_panic_base);
331 vm_map_offset_t macos_panic_end = round_page((vm_map_offset_t) (macos_panic_base + macos_panic_size));
332 #endif
333
334 boolean_t lastvavalid;
335 #if defined(__arm__) || defined(__arm64__)
336 vm_page_t m = VM_PAGE_NULL;
337 #endif
338
339 #if defined(__x86_64__)
340 assert(!is_ept_pmap(pmap));
341 #endif
342
343 /* Assumes pmap is locked, or being called from the kernel debugger */
344
345 if (start > end) {
346 return KERN_INVALID_ARGUMENT;
347 }
348
349 ret = KERN_SUCCESS;
350 lastvavalid = FALSE;
351 for (vcur = vcurstart = start; (ret == KERN_SUCCESS) && (vcur < end);) {
352 ppnum_t ppn = 0;
353
354 #if defined(__arm__) || defined(__arm64__)
355 /* We're at the start of the physmap, so pull out the pagetable pages that
356 * are accessed through that region.*/
357 if (vcur == phystokv(avail_start) && vm_object_lock_try_shared(&pmap_object_store)) {
358 m = (vm_page_t)vm_page_queue_first(&pmap_object_store.memq);
359 }
360
361 if (m != VM_PAGE_NULL) {
362 vm_map_offset_t vprev = vcur;
363 ppn = (ppnum_t)atop(avail_end);
364 while (!vm_page_queue_end(&pmap_object_store.memq, (vm_page_queue_entry_t)m)) {
365 /* Ignore pages that come from the static region and have already been dumped.*/
366 if (VM_PAGE_GET_PHYS_PAGE(m) >= atop(avail_start)) {
367 ppn = VM_PAGE_GET_PHYS_PAGE(m);
368 break;
369 }
370 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
371 }
372 vincr = PAGE_SIZE_64;
373 if (ppn == atop(avail_end)) {
374 vm_object_unlock(&pmap_object_store);
375 m = VM_PAGE_NULL;
376 // avail_end is not a valid physical address,
377 // so phystokv(avail_end) may not produce the expected result.
378 vcur = phystokv(avail_start) + (avail_end - avail_start);
379 } else {
380 m = (vm_page_t)vm_page_queue_next(&m->vmp_listq);
381 vcur = phystokv(ptoa(ppn));
382 }
383 if (vcur != vprev) {
384 ret = callback(vcurstart, vprev, context);
385 lastvavalid = FALSE;
386 }
387 }
388 if (m == VM_PAGE_NULL) {
389 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
390 }
391 #else /* defined(__arm__) || defined(__arm64__) */
392 ppn = kernel_pmap_present_mapping(vcur, &vincr, NULL);
393 #endif
394 if (ppn != 0) {
395 if (((vcur < debug_start) || (vcur >= debug_end))
396 && !(pmap_valid_page(ppn) || bootloader_valid_page(ppn))
397 #if defined(XNU_TARGET_OS_BRIDGE)
398 // include the macOS panic region if it's mapped
399 && ((vcur < macos_panic_start) || (vcur >= macos_panic_end))
400 #endif
401 ) {
402 /* not something we want */
403 ppn = 0;
404 }
405 /* include the phys carveout only if explictly marked */
406 if ((debug_is_in_phys_carveout(vcur) || debug_is_in_phys_carveout_metadata(vcur)) &&
407 !debug_can_coredump_phys_carveout()) {
408 ppn = 0;
409 }
410 }
411
412 if (ppn != 0) {
413 if (!lastvavalid) {
414 /* Start of a new virtual region */
415 vcurstart = vcur;
416 lastvavalid = TRUE;
417 }
418 } else {
419 if (lastvavalid) {
420 /* end of a virtual region */
421 ret = callback(vcurstart, vcur, context);
422 lastvavalid = FALSE;
423 }
424
425 #if defined(__x86_64__)
426 /* Try to skip by 2MB if possible */
427 if ((vcur & PDMASK) == 0) {
428 pd_entry_t *pde;
429 pde = pmap_pde(pmap, vcur);
430 if (0 == pde || ((*pde & INTEL_PTE_VALID) == 0)) {
431 /* Make sure we wouldn't overflow */
432 if (vcur < (end - NBPD)) {
433 vincr = NBPD;
434 }
435 }
436 }
437 #endif /* defined(__x86_64__) */
438 }
439 vcur += vincr;
440 }
441
442 if ((ret == KERN_SUCCESS) && lastvavalid) {
443 /* send previous run */
444 ret = callback(vcurstart, vcur, context);
445 }
446
447 #if KASAN
448 if (ret == KERN_SUCCESS) {
449 ret = kasan_traverse_mappings(callback, context);
450 }
451 #endif
452
453 return ret;
454 }
455
456 struct kern_dump_preflight_context {
457 uint32_t region_count;
458 uint64_t dumpable_bytes;
459 };
460
461 int
kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)462 kern_dump_pmap_traverse_preflight_callback(vm_map_offset_t start,
463 vm_map_offset_t end,
464 void *context)
465 {
466 struct kern_dump_preflight_context *kdc = (struct kern_dump_preflight_context *)context;
467 IOReturn ret = KERN_SUCCESS;
468
469 kdc->region_count++;
470 kdc->dumpable_bytes += (end - start);
471
472 return ret;
473 }
474
475
476 struct kern_dump_send_seg_desc_context {
477 core_save_segment_descriptions_cb callback;
478 void *context;
479 };
480
481 int
kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)482 kern_dump_pmap_traverse_send_segdesc_callback(vm_map_offset_t start,
483 vm_map_offset_t end,
484 void *context)
485 {
486 struct kern_dump_send_seg_desc_context *kds_context = (struct kern_dump_send_seg_desc_context *)context;
487 uint64_t seg_start = (uint64_t) start;
488 uint64_t seg_end = (uint64_t) end;
489
490 return kds_context->callback(seg_start, seg_end, kds_context->context);
491 }
492
493 struct kern_dump_send_segdata_context {
494 core_save_segment_data_cb callback;
495 void *context;
496 };
497
498 int
kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,vm_map_offset_t end,void * context)499 kern_dump_pmap_traverse_send_segdata_callback(vm_map_offset_t start,
500 vm_map_offset_t end,
501 void *context)
502 {
503 struct kern_dump_send_segdata_context *kds_context = (struct kern_dump_send_segdata_context *)context;
504
505 return kds_context->callback((void *)start, (uint64_t)(end - start), kds_context->context);
506 }
507
508 static int
kern_dump_save_summary(__unused void * refcon,core_save_summary_cb callback,void * context)509 kern_dump_save_summary(__unused void *refcon, core_save_summary_cb callback, void *context)
510 {
511 struct kern_dump_preflight_context kdc_preflight = { };
512 uint64_t thread_state_size = 0, thread_count = 0;
513 vm_map_offset_t vstart = kdp_core_start_addr();
514 kern_return_t ret;
515
516 ret = pmap_traverse_present_mappings(kernel_pmap,
517 vstart,
518 VM_MAX_KERNEL_ADDRESS,
519 kern_dump_pmap_traverse_preflight_callback,
520 &kdc_preflight);
521 if (ret != KERN_SUCCESS) {
522 kern_coredump_log(context, "save_summary: pmap traversal failed: %d\n", ret);
523 return ret;
524 }
525
526 kern_collectth_state_size(&thread_count, &thread_state_size);
527
528 ret = callback(kdc_preflight.region_count, kdc_preflight.dumpable_bytes,
529 thread_count, thread_state_size, 0, context);
530 return ret;
531 }
532
533 static int
kern_dump_save_seg_descriptions(__unused void * refcon,core_save_segment_descriptions_cb callback,void * context)534 kern_dump_save_seg_descriptions(__unused void *refcon, core_save_segment_descriptions_cb callback, void *context)
535 {
536 vm_map_offset_t vstart = kdp_core_start_addr();
537 kern_return_t ret;
538 struct kern_dump_send_seg_desc_context kds_context;
539
540 kds_context.callback = callback;
541 kds_context.context = context;
542
543 ret = pmap_traverse_present_mappings(kernel_pmap,
544 vstart,
545 VM_MAX_KERNEL_ADDRESS,
546 kern_dump_pmap_traverse_send_segdesc_callback,
547 &kds_context);
548 if (ret != KERN_SUCCESS) {
549 kern_coredump_log(context, "save_seg_desc: pmap traversal failed: %d\n", ret);
550 return ret;
551 }
552
553 return KERN_SUCCESS;
554 }
555
556 static int
kern_dump_save_thread_state(__unused void * refcon,void * buf,core_save_thread_state_cb callback,void * context)557 kern_dump_save_thread_state(__unused void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
558 {
559 kern_return_t ret;
560 uint64_t thread_state_size = 0, thread_count = 0;
561
562 kern_collectth_state_size(&thread_count, &thread_state_size);
563
564 if (thread_state_size > 0) {
565 void * iter = NULL;
566 do {
567 kern_collectth_state(current_thread(), buf, thread_state_size, &iter);
568
569 ret = callback(buf, context);
570 if (ret != KERN_SUCCESS) {
571 return ret;
572 }
573 } while (iter);
574 }
575
576 return KERN_SUCCESS;
577 }
578
579
580 static int
kern_dump_save_sw_vers_detail(__unused void * refcon,core_save_sw_vers_detail_cb callback,void * context)581 kern_dump_save_sw_vers_detail(__unused void *refcon, core_save_sw_vers_detail_cb callback, void *context)
582 {
583 return callback(vm_kernel_stext, kernel_uuid, 0, context);
584 }
585
586 static int
kern_dump_save_segment_data(__unused void * refcon,core_save_segment_data_cb callback,void * context)587 kern_dump_save_segment_data(__unused void *refcon, core_save_segment_data_cb callback, void *context)
588 {
589 vm_map_offset_t vstart = kdp_core_start_addr();
590 kern_return_t ret;
591 struct kern_dump_send_segdata_context kds_context;
592
593 kds_context.callback = callback;
594 kds_context.context = context;
595
596 ret = pmap_traverse_present_mappings(kernel_pmap,
597 vstart,
598 VM_MAX_KERNEL_ADDRESS, kern_dump_pmap_traverse_send_segdata_callback, &kds_context);
599 if (ret != KERN_SUCCESS) {
600 kern_coredump_log(context, "save_seg_data: pmap traversal failed: %d\n", ret);
601 return ret;
602 }
603
604 return KERN_SUCCESS;
605 }
606
607 kern_return_t
kdp_reset_output_vars(void * kdp_core_out_state,uint64_t totalbytes,bool encrypt_core,bool * out_should_skip_coredump)608 kdp_reset_output_vars(void *kdp_core_out_state, uint64_t totalbytes, bool encrypt_core, bool *out_should_skip_coredump)
609 {
610 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
611 struct kdp_output_stage *current_stage = NULL;
612
613 /* Re-initialize kdp_outstate */
614 outstate->kcos_totalbytes = totalbytes;
615 outstate->kcos_bytes_written = 0;
616 outstate->kcos_lastpercent = 0;
617 outstate->kcos_error = KERN_SUCCESS;
618
619 /* Reset the output stages */
620 STAILQ_FOREACH(current_stage, &outstate->kcos_out_stage, kos_next) {
621 current_stage->kos_funcs.kosf_reset(current_stage);
622 }
623
624 *out_should_skip_coredump = false;
625 if (encrypt_core) {
626 if (outstate->kcos_enforce_encryption && !outstate->kcos_encryption_stage) {
627 *out_should_skip_coredump = true;
628 #if defined(__arm__) || defined(__arm64__)
629 panic_info->eph_panic_flags |= EMBEDDED_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
630 #else
631 panic_info->mph_panic_flags |= MACOS_PANIC_HEADER_FLAG_ENCRYPTED_COREDUMP_SKIPPED;
632 #endif
633 kern_coredump_log(NULL, "(kdp_reset_output_vars) Encryption requested, is unavailable, and enforcement is active. Skipping current core.\n");
634 }
635 } else if (outstate->kcos_encryption_stage) {
636 outstate->kcos_encryption_stage->kos_bypass = true;
637 }
638
639 return KERN_SUCCESS;
640 }
641
642 static kern_return_t
kern_dump_update_header(struct kdp_core_out_state * outstate)643 kern_dump_update_header(struct kdp_core_out_state *outstate)
644 {
645 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
646 uint64_t foffset;
647 kern_return_t ret;
648
649 /* Write the file header -- first seek to the beginning of the file */
650 foffset = 0;
651 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
652 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
653 sizeof(foffset), &foffset, foffset, ret);
654 return ret;
655 }
656
657 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header_size, kdp_core_header)) != KERN_SUCCESS) {
658 kern_coredump_log(NULL, "(kern_dump_update_header) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
659 kdp_core_header_size, kdp_core_header, ret);
660 return ret;
661 }
662
663 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
664 kern_coredump_log(NULL, "(kern_dump_update_header) outproc data flush returned 0x%x\n", ret);
665 return ret;
666 }
667
668 #if defined(__arm__) || defined(__arm64__)
669 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
670 kern_coredump_log(NULL, "(kern_dump_update_header) outproc explicit flush returned 0x%x\n", ret);
671 return ret;
672 }
673 #endif /* defined(__arm__) || defined(__arm64__) */
674
675 return ret;
676 }
677
678 kern_return_t
kern_dump_record_file(void * kdp_core_out_state,const char * filename,uint64_t file_offset,uint64_t * out_file_length)679 kern_dump_record_file(void *kdp_core_out_state, const char *filename, uint64_t file_offset, uint64_t *out_file_length)
680 {
681 kern_return_t ret = KERN_SUCCESS;
682 uint64_t bytes_written = 0;
683 struct mach_core_details_v2 *core_details = NULL;
684 struct kdp_output_stage *last_stage;
685 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
686
687 assert(kdp_core_header->num_files < KERN_COREDUMP_MAX_CORES);
688 assert(out_file_length != NULL);
689 *out_file_length = 0;
690
691 last_stage = STAILQ_LAST(&outstate->kcos_out_stage, kdp_output_stage, kos_next);
692 bytes_written = last_stage->kos_bytes_written;
693
694 core_details = &(kdp_core_header->files[kdp_core_header->num_files]);
695 core_details->flags = MACH_CORE_DETAILS_V2_FLAG_COMPRESSED_ZLIB;
696 if (outstate->kcos_encryption_stage && outstate->kcos_encryption_stage->kos_bypass == false) {
697 core_details->flags |= MACH_CORE_DETAILS_V2_FLAG_ENCRYPTED_AEA;
698 }
699 core_details->offset = file_offset;
700 core_details->length = bytes_written;
701 strncpy((char *)&core_details->core_name, filename,
702 MACH_CORE_FILEHEADER_NAMELEN);
703 core_details->core_name[MACH_CORE_FILEHEADER_NAMELEN - 1] = '\0';
704
705 kdp_core_header->num_files++;
706
707 ret = kern_dump_update_header(outstate);
708 if (ret == KERN_SUCCESS) {
709 *out_file_length = bytes_written;
710 }
711
712 return ret;
713 }
714
715 kern_return_t
kern_dump_seek_to_next_file(void * kdp_core_out_state,uint64_t next_file_offset)716 kern_dump_seek_to_next_file(void *kdp_core_out_state, uint64_t next_file_offset)
717 {
718 struct kdp_core_out_state *outstate = (struct kdp_core_out_state *)kdp_core_out_state;
719 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
720 kern_return_t ret;
721
722 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(next_file_offset), &next_file_offset)) != KERN_SUCCESS) {
723 kern_coredump_log(NULL, "(kern_dump_seek_to_next_file) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
724 sizeof(next_file_offset), &next_file_offset, next_file_offset, ret);
725 }
726
727 return ret;
728 }
729
730 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
731
732 static kern_return_t
kern_dump_write_public_key(struct kdp_core_out_state * outstate)733 kern_dump_write_public_key(struct kdp_core_out_state *outstate)
734 {
735 struct kdp_output_stage *first_stage = STAILQ_FIRST(&outstate->kcos_out_stage);
736 uint64_t foffset;
737 uint64_t remainder = PUBLIC_KEY_RESERVED_LENGTH - kdp_core_header->pub_key_length;
738 kern_return_t ret;
739
740 if (kdp_core_header->pub_key_offset == 0 || kdp_core_header->pub_key_length == 0) {
741 // Nothing to do
742 return KERN_SUCCESS;
743 }
744
745 /* Write the public key -- first seek to the appropriate offset */
746 foffset = kdp_core_header->pub_key_offset;
747 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
748 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
749 sizeof(foffset), &foffset, foffset, ret);
750 return ret;
751 }
752
753 // Write the public key
754 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
755 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
756 kdp_core_header->pub_key_length, kdp_core_public_key, ret);
757 return ret;
758 }
759
760 // Fill out the remainder of the block with zeroes
761 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
762 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
763 remainder, ret);
764 return ret;
765 }
766
767 // Do it once more to write the "next" public key
768 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, kdp_core_header->pub_key_length, kdp_core_public_key)) != KERN_SUCCESS) {
769 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %u, %p) returned 0x%x\n",
770 kdp_core_header->pub_key_length, kdp_core_public_key, ret);
771 return ret;
772 }
773
774 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, remainder, NULL)) != KERN_SUCCESS) {
775 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc(KDP_DATA, NULL, %llu, NULL) returned 0x%x\n",
776 remainder, ret);
777 return ret;
778 }
779
780 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_DATA, NULL, 0, NULL)) != KERN_SUCCESS) {
781 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc data flush returned 0x%x\n", ret);
782 return ret;
783 }
784
785 #if defined(__arm__) || defined(__arm64__)
786 if ((ret = (first_stage->kos_funcs.kosf_outproc)(first_stage, KDP_FLUSH, NULL, 0, NULL)) != KERN_SUCCESS) {
787 kern_coredump_log(NULL, "(kern_dump_write_public_key) outproc explicit flush returned 0x%x\n", ret);
788 return ret;
789 }
790 #endif /* defined(__arm__) || defined(__arm64__) */
791
792 return ret;
793 }
794
795 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
796
797 static kern_return_t
chain_output_stages(enum kern_dump_type kd_variant,struct kdp_core_out_state * outstate)798 chain_output_stages(enum kern_dump_type kd_variant, struct kdp_core_out_state *outstate)
799 {
800 struct kdp_output_stage *current = NULL;
801
802 switch (kd_variant) {
803 case KERN_DUMP_STACKSHOT_DISK:
804 OS_FALLTHROUGH;
805 case KERN_DUMP_DISK:
806 #if defined(__arm64__)
807 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &memory_backing_aware_buffer_output_stage, kos_next);
808 #endif
809 if (!kdp_corezip_disabled) {
810 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
811 }
812 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
813 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
814 if (kdp_core_is_initializing_encryption_stage) {
815 kern_coredump_log(NULL, "We were in the middle of initializing encryption. Marking it as unavailable\n");
816 } else if (aea_output_stage.kos_initialized) {
817 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &aea_output_stage, kos_next);
818 outstate->kcos_encryption_stage = &aea_output_stage;
819 }
820 outstate->kcos_enforce_encryption = kern_dump_should_enforce_encryption();
821 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
822 if (kdp_core_is_initializing_disk_stage) {
823 kern_coredump_log(NULL, "We were in the middle of initializing the disk stage. Cannot write a coredump to disk\n");
824 return KERN_FAILURE;
825 } else if (disk_output_stage.kos_initialized == false) {
826 kern_coredump_log(NULL, "Corefile is not yet initialized. Cannot write a coredump to disk\n");
827 return KERN_FAILURE;
828 }
829 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &disk_output_stage, kos_next);
830 break;
831 case KERN_DUMP_NET:
832 if (!kdp_corezip_disabled) {
833 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
834 }
835 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &progress_notify_output_stage, kos_next);
836 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &buffer_output_stage, kos_next);
837 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &net_output_stage, kos_next);
838 break;
839 #if defined(__arm__) || defined(__arm64__)
840 case KERN_DUMP_HW_SHMEM_DBG:
841 if (!kdp_corezip_disabled) {
842 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &zlib_output_stage, kos_next);
843 }
844 STAILQ_INSERT_TAIL(&outstate->kcos_out_stage, &shmem_output_stage, kos_next);
845 break;
846 #endif /* defined(__arm__) || defined(__arm64__) */
847 }
848
849 STAILQ_FOREACH(current, &outstate->kcos_out_stage, kos_next) {
850 current->kos_outstate = outstate;
851 }
852
853 return KERN_SUCCESS;
854 }
855
856 static int
do_kern_dump(enum kern_dump_type kd_variant)857 do_kern_dump(enum kern_dump_type kd_variant)
858 {
859 struct kdp_core_out_state outstate = { };
860 struct kdp_output_stage *first_stage = NULL;
861 char *coredump_log_start = NULL, *buf = NULL;
862 size_t reserved_debug_logsize = 0, prior_debug_logsize = 0;
863 uint64_t foffset = 0;
864 kern_return_t ret = KERN_SUCCESS;
865 boolean_t output_opened = FALSE, dump_succeeded = TRUE;
866
867 /* Initialize output context */
868
869 bzero(&outstate, sizeof(outstate));
870 STAILQ_INIT(&outstate.kcos_out_stage);
871 ret = chain_output_stages(kd_variant, &outstate);
872 if (KERN_SUCCESS != ret) {
873 dump_succeeded = FALSE;
874 goto exit;
875 }
876 first_stage = STAILQ_FIRST(&outstate.kcos_out_stage);
877
878 /*
879 * Record the initial panic log buffer length so we can dump the coredump log
880 * and panic log to disk
881 */
882 coredump_log_start = debug_buf_ptr;
883 #if defined(__arm__) || defined(__arm64__)
884 assert(panic_info->eph_other_log_offset != 0);
885 assert(panic_info->eph_panic_log_len != 0);
886 /* Include any data from before the panic log as well */
887 prior_debug_logsize = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
888 panic_info->eph_panic_log_len + panic_info->eph_other_log_len;
889 #else /* defined(__arm__) || defined(__arm64__) */
890 if (panic_info->mph_panic_log_offset != 0) {
891 prior_debug_logsize = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
892 panic_info->mph_panic_log_len + panic_info->mph_other_log_len;
893 }
894 #endif /* defined(__arm__) || defined(__arm64__) */
895
896 assert(prior_debug_logsize <= debug_buf_size);
897
898 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
899 /* Open the file for output */
900 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_WRQ, NULL, 0, NULL)) != KERN_SUCCESS) {
901 kern_coredump_log(NULL, "outproc(KDP_WRQ, NULL, 0, NULL) returned 0x%x\n", ret);
902 dump_succeeded = FALSE;
903 goto exit;
904 }
905 }
906 output_opened = true;
907
908 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
909 const size_t aligned_corefile_header_size = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
910 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
911 const size_t aligned_public_key_size = PUBLIC_KEY_RESERVED_LENGTH * 2;
912 #else
913 const size_t aligned_public_key_size = 0;
914 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
915
916 reserved_debug_logsize = prior_debug_logsize + KERN_COREDUMP_MAXDEBUGLOGSIZE;
917
918 /* Space for file header, public key, panic log, core log */
919 foffset = roundup(aligned_corefile_header_size + aligned_public_key_size + reserved_debug_logsize, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
920 kdp_core_header->log_offset = aligned_corefile_header_size + aligned_public_key_size;
921
922 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
923 /* Write the public key */
924 ret = kern_dump_write_public_key(&outstate);
925 if (KERN_SUCCESS != ret) {
926 kern_coredump_log(NULL, "(do_kern_dump write public key) returned 0x%x\n", ret);
927 dump_succeeded = FALSE;
928 goto exit;
929 }
930 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
931
932 /* Seek the calculated offset (we'll scrollback later to flush the logs and header) */
933 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
934 kern_coredump_log(NULL, "(do_kern_dump seek begin) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
935 sizeof(foffset), &foffset, foffset, ret);
936 dump_succeeded = FALSE;
937 goto exit;
938 }
939 }
940
941 #if defined(__arm__) || defined(__arm64__)
942 flush_mmu_tlb();
943 #endif
944
945 kern_coredump_log(NULL, "%s", (kd_variant == KERN_DUMP_DISK) ? "Writing local cores...\n" :
946 "Transmitting kernel state, please wait:\n");
947
948
949 #if defined(__x86_64__)
950 if (((kd_variant == KERN_DUMP_STACKSHOT_DISK) || (kd_variant == KERN_DUMP_DISK)) && ((panic_stackshot_buf != 0) && (panic_stackshot_len != 0))) {
951 bool should_skip = false;
952
953 kern_coredump_log(NULL, "\nBeginning dump of kernel stackshot\n");
954
955 ret = kdp_reset_output_vars(&outstate, panic_stackshot_len, true, &should_skip);
956
957 if (ret != KERN_SUCCESS) {
958 kern_coredump_log(NULL, "Failed to reset outstate for stackshot with len 0x%zx, returned 0x%x\n", panic_stackshot_len, ret);
959 dump_succeeded = FALSE;
960 } else if (!should_skip) {
961 uint64_t compressed_stackshot_len = 0;
962 if ((ret = kdp_core_output(&outstate, panic_stackshot_len, (void *)panic_stackshot_buf)) != KERN_SUCCESS) {
963 kern_coredump_log(NULL, "Failed to write panic stackshot to file, kdp_coreoutput(outstate, %lu, %p) returned 0x%x\n",
964 panic_stackshot_len, (void *) panic_stackshot_buf, ret);
965 dump_succeeded = FALSE;
966 } else if ((ret = kdp_core_output(&outstate, 0, NULL)) != KERN_SUCCESS) {
967 kern_coredump_log(NULL, "Failed to flush stackshot data : kdp_core_output(%p, 0, NULL) returned 0x%x\n", &outstate, ret);
968 dump_succeeded = FALSE;
969 } else if ((ret = kern_dump_record_file(&outstate, "panic_stackshot.kcdata", foffset, &compressed_stackshot_len)) != KERN_SUCCESS) {
970 kern_coredump_log(NULL, "Failed to record panic stackshot in corefile header, kern_dump_record_file returned 0x%x\n", ret);
971 dump_succeeded = FALSE;
972 } else {
973 kern_coredump_log(NULL, "Recorded panic stackshot in corefile at offset 0x%llx, compressed to %llu bytes\n", foffset, compressed_stackshot_len);
974 foffset = roundup((foffset + compressed_stackshot_len), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
975 if ((ret = kern_dump_seek_to_next_file(&outstate, foffset)) != KERN_SUCCESS) {
976 kern_coredump_log(NULL, "Failed to seek to stackshot file offset 0x%llx, kern_dump_seek_to_next_file returned 0x%x\n", foffset, ret);
977 dump_succeeded = FALSE;
978 }
979 }
980 } else {
981 kern_coredump_log(NULL, "Skipping stackshot dump\n");
982 }
983 }
984 #endif
985
986 if (kd_variant == KERN_DUMP_DISK) {
987 /*
988 * Dump co-processors as well, foffset will be overwritten with the
989 * offset of the next location in the file to be written to.
990 */
991 if (kern_do_coredump(&outstate, FALSE, foffset, &foffset) != 0) {
992 dump_succeeded = FALSE;
993 }
994 } else if (kd_variant != KERN_DUMP_STACKSHOT_DISK) {
995 /* Only the kernel */
996 if (kern_do_coredump(&outstate, TRUE, foffset, &foffset) != 0) {
997 dump_succeeded = FALSE;
998 }
999 }
1000
1001 if (kd_variant == KERN_DUMP_DISK) {
1002 assert(reserved_debug_logsize != 0);
1003 size_t remaining_debug_logspace = reserved_debug_logsize;
1004
1005 /* Write the debug log -- first seek to the end of the corefile header */
1006 foffset = kdp_core_header->log_offset;
1007 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_SEEK, NULL, sizeof(foffset), &foffset)) != KERN_SUCCESS) {
1008 kern_coredump_log(NULL, "(do_kern_dump seek logfile) outproc(KDP_SEEK, NULL, %lu, %p) foffset = 0x%llx returned 0x%x\n",
1009 sizeof(foffset), &foffset, foffset, ret);
1010 dump_succeeded = FALSE;
1011 goto exit;
1012 }
1013
1014 /* First flush the data from just the paniclog */
1015 size_t initial_log_length = 0;
1016 #if defined(__arm__) || defined(__arm64__)
1017 initial_log_length = (panic_info->eph_panic_log_offset - sizeof(struct embedded_panic_header)) +
1018 panic_info->eph_panic_log_len;
1019 #else
1020 if (panic_info->mph_panic_log_offset != 0) {
1021 initial_log_length = (panic_info->mph_panic_log_offset - sizeof(struct macos_panic_header)) +
1022 panic_info->mph_panic_log_len;
1023 }
1024 #endif
1025
1026 buf = debug_buf_base;
1027 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, initial_log_length, buf)) != KERN_SUCCESS) {
1028 kern_coredump_log(NULL, "(do_kern_dump paniclog) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1029 initial_log_length, buf, ret);
1030 dump_succeeded = FALSE;
1031 goto exit;
1032 }
1033
1034 remaining_debug_logspace -= initial_log_length;
1035
1036 /* Next include any log data from after the stackshot (the beginning of the 'other' log). */
1037 #if defined(__arm__) || defined(__arm64__)
1038 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->eph_other_log_offset);
1039 #else
1040 /*
1041 * There may be no paniclog if we're doing a coredump after a call to Debugger() on x86 if debugger_is_panic was
1042 * configured to FALSE based on the boot-args. In that case just start from where the debug buffer was when
1043 * we began taking a coredump.
1044 */
1045 if (panic_info->mph_other_log_offset != 0) {
1046 buf = (char *)(((char *)panic_info) + (uintptr_t) panic_info->mph_other_log_offset);
1047 } else {
1048 buf = coredump_log_start;
1049 }
1050 #endif
1051 assert(debug_buf_ptr >= buf);
1052
1053 size_t other_log_length = debug_buf_ptr - buf;
1054 if (other_log_length > remaining_debug_logspace) {
1055 other_log_length = remaining_debug_logspace;
1056 }
1057
1058 /* Write the coredump log */
1059 if ((ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_DATA, NULL, other_log_length, buf)) != KERN_SUCCESS) {
1060 kern_coredump_log(NULL, "(do_kern_dump coredump log) outproc(KDP_DATA, NULL, %lu, %p) returned 0x%x\n",
1061 other_log_length, buf, ret);
1062 dump_succeeded = FALSE;
1063 goto exit;
1064 }
1065
1066 kdp_core_header->log_length = initial_log_length + other_log_length;
1067 kern_dump_update_header(&outstate);
1068 }
1069
1070 exit:
1071 /* close / last packet */
1072 if (output_opened && (ret = first_stage->kos_funcs.kosf_outproc(first_stage, KDP_EOF, NULL, 0, ((void *) 0))) != KERN_SUCCESS) {
1073 kern_coredump_log(NULL, "(do_kern_dump close) outproc(KDP_EOF, NULL, 0, 0) returned 0x%x\n", ret);
1074 dump_succeeded = FALSE;
1075 }
1076
1077 /* If applicable, update the panic header and flush it so we update the CRC */
1078 #if defined(__arm__) || defined(__arm64__)
1079 panic_info->eph_panic_flags |= (dump_succeeded ? EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1080 EMBEDDED_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1081 paniclog_flush();
1082 #else
1083 if (panic_info->mph_panic_log_offset != 0) {
1084 panic_info->mph_panic_flags |= (dump_succeeded ? MACOS_PANIC_HEADER_FLAG_COREDUMP_COMPLETE :
1085 MACOS_PANIC_HEADER_FLAG_COREDUMP_FAILED);
1086 paniclog_flush();
1087 }
1088 #endif
1089
1090 return dump_succeeded ? 0 : -1;
1091 }
1092
1093 boolean_t
dumped_kernel_core(void)1094 dumped_kernel_core(void)
1095 {
1096 return kern_dump_successful;
1097 }
1098
1099 int
kern_dump(enum kern_dump_type kd_variant)1100 kern_dump(enum kern_dump_type kd_variant)
1101 {
1102 static boolean_t local_dump_in_progress = FALSE, dumped_local = FALSE;
1103 int ret = -1;
1104 #if KASAN
1105 kasan_kdp_disable();
1106 #endif
1107 if ((kd_variant == KERN_DUMP_DISK) || (kd_variant == KERN_DUMP_STACKSHOT_DISK)) {
1108 if (dumped_local) {
1109 return 0;
1110 }
1111 if (local_dump_in_progress) {
1112 return -1;
1113 }
1114 local_dump_in_progress = TRUE;
1115 #if defined(__arm__) || defined(__arm64__)
1116 shmem_mark_as_busy();
1117 #endif
1118 ret = do_kern_dump(kd_variant);
1119 if (ret == 0) {
1120 dumped_local = TRUE;
1121 kern_dump_successful = TRUE;
1122 local_dump_in_progress = FALSE;
1123 }
1124
1125 return ret;
1126 #if defined(__arm__) || defined(__arm64__)
1127 } else if (kd_variant == KERN_DUMP_HW_SHMEM_DBG) {
1128 ret = do_kern_dump(kd_variant);
1129 if (ret == 0) {
1130 kern_dump_successful = TRUE;
1131 }
1132 return ret;
1133 #endif
1134 } else {
1135 ret = do_kern_dump(kd_variant);
1136 if (ret == 0) {
1137 kern_dump_successful = TRUE;
1138 }
1139 return ret;
1140 }
1141 }
1142
1143 static kern_return_t
kdp_core_init_output_stages(void)1144 kdp_core_init_output_stages(void)
1145 {
1146 kern_return_t ret = KERN_SUCCESS;
1147
1148 // We only zero-out the disk stage. It will be initialized
1149 // later on when the corefile is initialized
1150 bzero(&disk_output_stage, sizeof(disk_output_stage));
1151
1152 bzero(&zlib_output_stage, sizeof(zlib_output_stage));
1153 ret = zlib_stage_initialize(&zlib_output_stage);
1154 if (KERN_SUCCESS != ret) {
1155 return ret;
1156 }
1157
1158 bzero(&buffer_output_stage, sizeof(buffer_output_stage));
1159 ret = buffer_stage_initialize(&buffer_output_stage, kdp_crashdump_pkt_size);
1160 if (KERN_SUCCESS != ret) {
1161 return ret;
1162 }
1163
1164 bzero(&net_output_stage, sizeof(net_output_stage));
1165 ret = net_stage_initialize(&net_output_stage);
1166 if (KERN_SUCCESS != ret) {
1167 return ret;
1168 }
1169
1170 bzero(&progress_notify_output_stage, sizeof(progress_notify_output_stage));
1171 ret = progress_notify_stage_initialize(&progress_notify_output_stage);
1172 if (KERN_SUCCESS != ret) {
1173 return ret;
1174 }
1175
1176 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1177 // We only zero-out the AEA stage. It will be initialized
1178 // later on, if it's supported and needed
1179 bzero(&aea_output_stage, sizeof(aea_output_stage));
1180 aea_stage_monitor_availability();
1181 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1182
1183 #if defined(__arm__) || defined(__arm64__)
1184 bzero(&shmem_output_stage, sizeof(shmem_output_stage));
1185 if (PE_consistent_debug_enabled() && PE_i_can_has_debugger(NULL)) {
1186 ret = shmem_stage_initialize(&shmem_output_stage);
1187 if (KERN_SUCCESS != ret) {
1188 return ret;
1189 }
1190 }
1191 #endif /* defined(__arm__) || defined(__arm64__) */
1192
1193 #if defined(__arm64__)
1194 bzero(&memory_backing_aware_buffer_output_stage, sizeof(memory_backing_aware_buffer_output_stage));
1195 ret = memory_backing_aware_buffer_stage_initialize(&memory_backing_aware_buffer_output_stage);
1196 if (KERN_SUCCESS != ret) {
1197 return ret;
1198 }
1199 #endif /* defined(__arm64__) */
1200
1201 return ret;
1202 }
1203
1204 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1205
1206 static bool
kern_dump_should_enforce_encryption(void)1207 kern_dump_should_enforce_encryption(void)
1208 {
1209 static int enforce_encryption = -1;
1210
1211 // Only check once
1212 if (enforce_encryption == -1) {
1213 uint32_t coredump_encryption_flags = 0;
1214
1215 // When set, the boot-arg is the sole decider
1216 if (!kernel_debugging_restricted() &&
1217 PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags))) {
1218 enforce_encryption = (coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_ENFORCEMENT) != 0 ? 1 : 0;
1219 } else {
1220 enforce_encryption = 0;
1221 }
1222 }
1223
1224 return enforce_encryption != 0;
1225 }
1226
1227 static bool
kern_dump_is_encryption_available(void)1228 kern_dump_is_encryption_available(void)
1229 {
1230 // Default to feature enabled unless boot-arg says otherwise
1231 uint32_t coredump_encryption_flags = COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY;
1232
1233 if (!kernel_debugging_restricted()) {
1234 PE_parse_boot_argn("coredump_encryption", &coredump_encryption_flags, sizeof(coredump_encryption_flags));
1235 }
1236
1237 if ((coredump_encryption_flags & COREDUMP_ENCRYPTION_OVERRIDES_AVAILABILITY) == 0) {
1238 return false;
1239 }
1240
1241 return aea_stage_is_available();
1242 }
1243
1244 /*
1245 * Initialize (or de-initialize) the encryption stage. This is done in a way such that if initializing the
1246 * encryption stage with a new key fails, then the existing encryption stage is left untouched. Once
1247 * the new stage is initialized, the old stage is uninitialized.
1248 *
1249 * This function is called whenever we have a new public key (whether from someone calling our sysctl, or because
1250 * we read it out of a corefile), or when encryption becomes available.
1251 *
1252 * Parameters:
1253 * - public_key: The public key to use when initializing the encryption stage. Can be NULL to indicate that
1254 * the encryption stage should be de-initialized.
1255 * - public_key_size: The size of the given public key.
1256 */
1257 static kern_return_t
kdp_core_init_encryption_stage(void * public_key,size_t public_key_size)1258 kdp_core_init_encryption_stage(void *public_key, size_t public_key_size)
1259 {
1260 kern_return_t ret = KERN_SUCCESS;
1261 struct kdp_output_stage new_encryption_stage = {};
1262 struct kdp_output_stage old_encryption_stage = {};
1263
1264 lck_mtx_assert(kdp_core_encryption_stage_lock, LCK_MTX_ASSERT_OWNED);
1265
1266 bzero(&new_encryption_stage, sizeof(new_encryption_stage));
1267
1268 if (public_key && kern_dump_is_encryption_available()) {
1269 ret = aea_stage_initialize(&new_encryption_stage, public_key, public_key_size);
1270 if (KERN_SUCCESS != ret) {
1271 printf("(kdp_core_init_encryption_stage) Failed to initialize the encryption stage. Error 0x%x\n", ret);
1272 return ret;
1273 }
1274 }
1275
1276 bcopy(&aea_output_stage, &old_encryption_stage, sizeof(aea_output_stage));
1277
1278 bcopy(&new_encryption_stage, &aea_output_stage, sizeof(new_encryption_stage));
1279
1280 if (old_encryption_stage.kos_initialized && old_encryption_stage.kos_funcs.kosf_free) {
1281 old_encryption_stage.kos_funcs.kosf_free(&old_encryption_stage);
1282 }
1283
1284 return KERN_SUCCESS;
1285 }
1286
1287 kern_return_t
kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data,void * access_context,void * recipient_context)1288 kdp_core_handle_new_encryption_key(IOCoreFileAccessCallback access_data, void *access_context, void *recipient_context)
1289 {
1290 kern_return_t ret = KERN_SUCCESS;
1291 struct kdp_core_encryption_key_descriptor *key_descriptor = (struct kdp_core_encryption_key_descriptor *) recipient_context;
1292 void *old_public_key = NULL;
1293 size_t old_public_key_size = 0;
1294
1295 if (!key_descriptor) {
1296 return kIOReturnBadArgument;
1297 }
1298
1299 lck_mtx_lock(kdp_core_encryption_stage_lock);
1300 kdp_core_is_initializing_encryption_stage = true;
1301
1302 do {
1303 // Do the risky part first, and bail out cleanly if it fails
1304 ret = kdp_core_init_encryption_stage(key_descriptor->kcekd_key, key_descriptor->kcekd_size);
1305 if (ret != KERN_SUCCESS) {
1306 printf("kdp_core_handle_new_encryption_key failed to re-initialize encryption stage. Error 0x%x\n", ret);
1307 break;
1308 }
1309
1310 // The rest of this function should technically never fail
1311
1312 old_public_key = kdp_core_public_key;
1313 old_public_key_size = kdp_core_header->pub_key_length;
1314
1315 kdp_core_public_key = key_descriptor->kcekd_key;
1316 kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1317 kdp_core_header->flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_EXISTING_COREFILE_KEY_FORMAT_MASK;
1318 if (key_descriptor->kcekd_key) {
1319 kdp_core_header->flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1320 kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(key_descriptor->kcekd_format);
1321 kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1322 kdp_core_header->pub_key_length = key_descriptor->kcekd_size;
1323 } else {
1324 kdp_core_header->pub_key_offset = 0;
1325 kdp_core_header->pub_key_length = 0;
1326 }
1327
1328 /*
1329 * Return the old key to the caller to free
1330 */
1331 key_descriptor->kcekd_key = old_public_key;
1332 key_descriptor->kcekd_size = (uint16_t)old_public_key_size;
1333
1334 // If this stuff fails, we have bigger problems
1335 struct mach_core_fileheader_v2 existing_header;
1336 bool used_existing_header = false;
1337 ret = access_data(access_context, FALSE, 0, sizeof(existing_header), &existing_header);
1338 if (ret != KERN_SUCCESS) {
1339 printf("kdp_core_handle_new_encryption_key failed to read the existing corefile header. Error 0x%x\n", ret);
1340 break;
1341 }
1342
1343 if (existing_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1344 && existing_header.version == 2
1345 && (existing_header.pub_key_length == 0
1346 || kdp_core_header->pub_key_length == 0
1347 || existing_header.pub_key_length == kdp_core_header->pub_key_length)) {
1348 used_existing_header = true;
1349 existing_header.flags &= ~MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1350
1351 if (kdp_core_public_key) {
1352 existing_header.flags |= key_descriptor->kcekd_format & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1353
1354 if (existing_header.pub_key_offset == 0) {
1355 existing_header.pub_key_offset = kdp_core_header->pub_key_offset;
1356 existing_header.pub_key_length = kdp_core_header->pub_key_length;
1357 }
1358 }
1359
1360 ret = access_data(access_context, TRUE, 0, sizeof(existing_header), &existing_header);
1361 if (ret != KERN_SUCCESS) {
1362 printf("kdp_core_handle_new_encryption_key failed to update the existing corefile header. Error 0x%x\n", ret);
1363 break;
1364 }
1365 } else {
1366 ret = access_data(access_context, TRUE, 0, sizeof(struct mach_core_fileheader_v2), kdp_core_header);
1367 if (ret != KERN_SUCCESS) {
1368 printf("kdp_core_handle_new_encryption_key failed to write the corefile header. Error 0x%x\n", ret);
1369 break;
1370 }
1371 }
1372
1373 if (kdp_core_header->pub_key_length) {
1374 uint64_t offset = used_existing_header ? existing_header.pub_key_offset : kdp_core_header->pub_key_offset;
1375 ret = access_data(access_context, TRUE, offset + PUBLIC_KEY_RESERVED_LENGTH, kdp_core_header->pub_key_length, kdp_core_public_key);
1376 if (ret != KERN_SUCCESS) {
1377 printf("kdp_core_handle_new_encryption_key failed to write the next public key. Error 0x%x\n", ret);
1378 break;
1379 }
1380
1381 if (!used_existing_header) {
1382 // Everything that happens here is optional. It's not the end of the world if this stuff fails, so we don't return
1383 // any errors
1384 // Since we're writing out a completely new header, we make sure to zero-out the region that's reserved for the public key.
1385 // This allows us consumers of the corefile to know for sure that this corefile is not encrypted (yet). Once we actually
1386 // write out a corefile, we'll overwrite this region with the key that we ended up using at the time.
1387 // If we fail to zero-out this region, consumers would read garbage data and properly fail to interpret it as a public key,
1388 // which is why it is OK for us to fail here (it's hard to interpret garbage data as a valid key, and even then, they wouldn't
1389 // find a matching private key anyway)
1390 void *empty_key = NULL;
1391 kern_return_t temp_ret = KERN_SUCCESS;
1392
1393 empty_key = kalloc_data(PUBLIC_KEY_RESERVED_LENGTH,
1394 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1395
1396 temp_ret = access_data(access_context, TRUE, offset, PUBLIC_KEY_RESERVED_LENGTH, empty_key);
1397 kfree_data(empty_key, PUBLIC_KEY_RESERVED_LENGTH);
1398
1399 if (temp_ret != KERN_SUCCESS) {
1400 printf("kdp_core_handle_new_encryption_key failed to zero-out the public key region. Error 0x%x\n", temp_ret);
1401 break;
1402 }
1403 }
1404 }
1405 } while (0);
1406
1407 kdp_core_is_initializing_encryption_stage = false;
1408 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1409
1410 return ret;
1411 }
1412
1413 kern_return_t
kdp_core_handle_encryption_available(void)1414 kdp_core_handle_encryption_available(void)
1415 {
1416 kern_return_t ret;
1417
1418 lck_mtx_lock(kdp_core_encryption_stage_lock);
1419 kdp_core_is_initializing_encryption_stage = true;
1420
1421 ret = kdp_core_init_encryption_stage(kdp_core_public_key, kdp_core_header->pub_key_length);
1422
1423 kdp_core_is_initializing_encryption_stage = false;
1424 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1425
1426 return ret;
1427 }
1428
1429 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1430
1431 kern_return_t
kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data,void * access_context,__unused void * recipient_context)1432 kdp_core_polled_io_polled_file_available(IOCoreFileAccessCallback access_data, void *access_context, __unused void *recipient_context)
1433 {
1434 kern_return_t ret = KERN_SUCCESS;
1435
1436 lck_mtx_lock(kdp_core_disk_stage_lock);
1437 kdp_core_is_initializing_disk_stage = true;
1438
1439 ret = disk_stage_initialize(&disk_output_stage);
1440
1441 kdp_core_is_initializing_disk_stage = false;
1442 lck_mtx_unlock(kdp_core_disk_stage_lock);
1443
1444 if (KERN_SUCCESS != ret) {
1445 return ret;
1446 }
1447
1448 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1449 // If someone has already provided a new public key,
1450 // there's no sense in reading the old one from the corefile.
1451 if (kdp_core_public_key != NULL) {
1452 return KERN_SUCCESS;
1453 }
1454
1455 // The kernel corefile is now available. Let's try to retrieve the public key from its
1456 // header (if available and supported).
1457
1458 // First let's read the corefile header itself
1459 struct mach_core_fileheader_v2 temp_header = {};
1460 ret = access_data(access_context, FALSE, 0, sizeof(temp_header), &temp_header);
1461 if (KERN_SUCCESS != ret) {
1462 printf("kdp_core_polled_io_polled_file_available failed to read corefile header. Error 0x%x\n", ret);
1463 return ret;
1464 }
1465
1466 // Check if the corefile header is initialized, and whether it's initialized to values that we support
1467 // (for backwards and forwards) compatibility, and check whether the header indicates that the corefile has
1468 // has a public key stashed inside of it.
1469 if (temp_header.signature == MACH_CORE_FILEHEADER_V2_SIGNATURE
1470 && temp_header.version == 2
1471 && temp_header.pub_key_offset != 0
1472 && temp_header.pub_key_length != 0
1473 /* Future-proofing: make sure it's the key format that we support */
1474 && (temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK) == MACH_CORE_FILEHEADER_V2_FLAG_NEXT_COREFILE_KEY_FORMAT_NIST_P256
1475 /* Add some extra sanity checks. These are not necessary */
1476 && temp_header.pub_key_length <= 4096
1477 && temp_header.pub_key_offset < 65535) {
1478 // The corefile header is properly initialized, is supported, and contains a public key.
1479 // Let's adopt that public key for our encryption needs
1480 void *public_key = NULL;
1481
1482 public_key = kalloc_data(temp_header.pub_key_length,
1483 Z_ZERO | Z_WAITOK | Z_NOFAIL);
1484
1485 // Read the public key from the corefile. Note that the key we're trying to adopt is the "next" key, which is
1486 // PUBLIC_KEY_RESERVED_LENGTH bytes after the public key.
1487 ret = access_data(access_context, FALSE, temp_header.pub_key_offset + PUBLIC_KEY_RESERVED_LENGTH, temp_header.pub_key_length, public_key);
1488 if (KERN_SUCCESS != ret) {
1489 printf("kdp_core_polled_io_polled_file_available failed to read the public key. Error 0x%x\n", ret);
1490 kfree_data(public_key, temp_header.pub_key_length);
1491 return ret;
1492 }
1493
1494 lck_mtx_lock(kdp_core_encryption_stage_lock);
1495 kdp_core_is_initializing_encryption_stage = true;
1496
1497 ret = kdp_core_init_encryption_stage(public_key, temp_header.pub_key_length);
1498 if (KERN_SUCCESS == ret) {
1499 kdp_core_header->flags |= temp_header.flags & MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_COREFILE_KEY_FORMAT_MASK;
1500 kdp_core_header->flags |= MACH_CORE_FILEHEADER_V2_FLAGS_NEXT_KEY_FORMAT_TO_KEY_FORMAT(temp_header.flags);
1501 kdp_core_header->pub_key_offset = roundup(kdp_core_header_size, KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1502 kdp_core_header->pub_key_length = temp_header.pub_key_length;
1503 kdp_core_public_key = public_key;
1504 }
1505
1506 kdp_core_is_initializing_encryption_stage = false;
1507 lck_mtx_unlock(kdp_core_encryption_stage_lock);
1508 }
1509 #else
1510 #pragma unused(access_data, access_context)
1511 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1512
1513 return ret;
1514 }
1515
1516 kern_return_t
kdp_core_polled_io_polled_file_unavailable(void)1517 kdp_core_polled_io_polled_file_unavailable(void)
1518 {
1519 lck_mtx_lock(kdp_core_disk_stage_lock);
1520 kdp_core_is_initializing_disk_stage = true;
1521
1522 if (disk_output_stage.kos_initialized && disk_output_stage.kos_funcs.kosf_free) {
1523 disk_output_stage.kos_funcs.kosf_free(&disk_output_stage);
1524 }
1525
1526 kdp_core_is_initializing_disk_stage = false;
1527 lck_mtx_unlock(kdp_core_disk_stage_lock);
1528
1529 return KERN_SUCCESS;
1530 }
1531
1532 void
kdp_core_init(void)1533 kdp_core_init(void)
1534 {
1535 kern_return_t kr;
1536 kern_coredump_callback_config core_config = { };
1537
1538 /* Initialize output stages */
1539 kr = kdp_core_init_output_stages();
1540 assert(KERN_SUCCESS == kr);
1541
1542 kmem_alloc(kernel_map, (vm_offset_t*)&kdp_core_header,
1543 kdp_core_header_size,
1544 KMA_NOFAIL | KMA_ZERO | KMA_PERMANENT | KMA_KOBJECT | KMA_DATA,
1545 VM_KERN_MEMORY_DIAG);
1546
1547 kdp_core_header->signature = MACH_CORE_FILEHEADER_V2_SIGNATURE;
1548 kdp_core_header->version = 2;
1549
1550 kdp_core_initialization_lock_group = lck_grp_alloc_init("KDPCoreStageInit", LCK_GRP_ATTR_NULL);
1551 kdp_core_disk_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1552
1553 #ifdef CONFIG_KDP_COREDUMP_ENCRYPTION
1554 kdp_core_encryption_stage_lock = lck_mtx_alloc_init(kdp_core_initialization_lock_group, LCK_ATTR_NULL);
1555
1556 (void) kern_dump_should_enforce_encryption();
1557 #endif // CONFIG_KDP_COREDUMP_ENCRYPTION
1558
1559 core_config.kcc_coredump_init = NULL; /* TODO: consider doing mmu flush from an init function */
1560 core_config.kcc_coredump_get_summary = kern_dump_save_summary;
1561 core_config.kcc_coredump_save_segment_descriptions = kern_dump_save_seg_descriptions;
1562 core_config.kcc_coredump_save_thread_state = kern_dump_save_thread_state;
1563 core_config.kcc_coredump_save_sw_vers_detail = kern_dump_save_sw_vers_detail;
1564 core_config.kcc_coredump_save_segment_data = kern_dump_save_segment_data;
1565
1566 kr = kern_register_xnu_coredump_helper(&core_config);
1567 assert(KERN_SUCCESS == kr);
1568 }
1569
1570 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1571