xref: /xnu-8020.140.41/osfmk/kdp/processor_core.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kern/assert.h>
32 #include <kern/zalloc.h>
33 #include <libkern/kernel_mach_header.h>
34 #include <libkern/OSAtomic.h>
35 #include <libsa/types.h>
36 #include <pexpert/pexpert.h>
37 
38 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
39 
40 #define roundup(x, y)   ((((x) % (y)) == 0) ? \
41 	                (x) : ((x) + ((y) - ((x) % (y)))))
42 
43 #define DATA_OWNER_MAIN_BIN_SPEC "main bin spec"
44 /*
45  * Format of the "main bin spec" LC_NOTE payload as expected by LLDB
46  */
47 typedef struct {
48 	uint32_t version;       // currently 1
49 	uint32_t type;          // 0 == unspecified, 1 == kernel, 2 == user process, 3 == standalone (ie FW)
50 	uint64_t address;       // UINT64_MAX if address not specified
51 	uuid_t   uuid;          // all zero's if uuid not specified
52 	uint32_t log2_pagesize; // process page size in log base 2, e.g. 4k pages are 12. 0 for unspecified
53 	uint32_t unused;        // leave set to 0
54 } __attribute__((packed)) main_bin_spec;
55 #define MAIN_BIN_SPEC_VERSION 1
56 #define MAIN_BIN_SPEC_TYPE_KERNEL 1
57 #define MAIN_BIN_SPEC_TYPE_STANDALONE 3
58 
59 #define DATA_OWNER_LEGACY_BIN_SPEC "kern ver str"
60 /*
61  * Format of the legacy bin spec (LC_IDENT-like) LC_NOTE payload as expected by LLDB
62  */
63 typedef struct {
64 	uint32_t version; // currently 1
65 	char version_string[KERN_COREDUMP_VERSIONSTRINGMAXSIZE];
66 } __attribute__((packed)) legacy_bin_spec;
67 #define LEGACY_BIN_SPEC_VERSION 1
68 
69 /*
70  * The processor_core_context structure describes the current
71  * corefile that's being generated. It also includes a pointer
72  * to the core_outvars which is used by the KDP code for context
73  * about the specific output mechanism being used.
74  *
75  * We include *remaining variables to catch inconsistencies / bugs
76  * in the co-processor coredump callbacks.
77  */
78 typedef struct {
79 	struct kdp_core_out_vars * core_outvars;     /* Output procedure info (see kdp_out_stage.h) */
80 	kern_coredump_callback_config *core_config;  /* Information about core currently being dumped */
81 	void *core_refcon;                           /* Reference constant associated with the coredump helper */
82 	boolean_t core_should_be_skipped;            /* Indicates whether this specific core should not be dumped */
83 	boolean_t core_is64bit;                      /* Bitness of CPU */
84 	boolean_t core_isxnu;                        /* Indicates whether this core is the currently running xnu */
85 	uint32_t core_mh_magic;                      /* Magic for mach header */
86 	cpu_type_t core_cpu_type;                    /* CPU type for mach header */
87 	cpu_subtype_t core_cpu_subtype;              /* CPU subtype for mach header */
88 	uint64_t core_file_length;                   /* Overall corefile length including any zero padding */
89 	uint64_t core_file_length_compressed;        /* File length after compression */
90 	uint64_t core_segment_count;                 /* Number of LC_SEGMENTs in the core currently being dumped */
91 	uint64_t core_segments_remaining;            /* Number of LC_SEGMENTs that have not been added to the header */
92 	uint64_t core_segment_byte_total;            /* Sum of all the data from the LC_SEGMENTS in the core */
93 	uint64_t core_segment_bytes_remaining;       /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
94 	uint64_t core_thread_count;                  /* Number of LC_THREADs to be included */
95 	uint64_t core_threads_remaining;             /* Number of LC_THREADs that have yet to be included */
96 	uint64_t core_thread_state_size;             /* Size of each LC_THREAD */
97 	uint64_t core_note_count;                    /* Number of LC_NOTEs to be included */
98 	uint64_t core_notes_remaining;               /* Number of LC_NOTEs that have not been added to the header */
99 	uint64_t core_note_bytes_total;              /* Sum of all data from the LC_NOTE segments in the core */
100 	uint64_t core_note_bytes_remaining;          /* Quantity of data remaining from LC_NOTEs that have yet to be added */
101 	uint64_t core_cur_hoffset;                   /* Current offset in this core's header */
102 	uint64_t core_cur_foffset;                   /* Current offset in this core's overall file */
103 	uint64_t core_header_size;                   /* Size of this core's header */
104 	uint64_t core_total_bytes;                   /* Total amount of data to be included in this core (excluding zero fill) */
105 } processor_core_context;
106 
107 /*
108  * The kern_coredump_core structure describes a core that has been
109  * registered for use by the coredump mechanism.
110  */
111 struct kern_coredump_core {
112 	struct kern_coredump_core *kcc_next;             /* Next processor to dump */
113 	void *kcc_refcon;                                /* Reference constant to be passed to callbacks */
114 	char kcc_corename[MACH_CORE_FILEHEADER_NAMELEN]; /* Description of this processor */
115 	boolean_t kcc_is64bit;                           /* Processor bitness */
116 	uint32_t kcc_mh_magic;                           /* Magic for mach header */
117 	cpu_type_t kcc_cpu_type;                         /* CPU type for mach header */
118 	cpu_subtype_t kcc_cpu_subtype;                   /* CPU subtype for mach header */
119 	kern_coredump_callback_config kcc_cb;            /* Registered processor callbacks for coredump */
120 } * kern_coredump_core_list = NULL;
121 
122 typedef kern_return_t (*legacy_sw_vers_registered_cb)(void *refcon, core_save_sw_vers_cb callback, void *context);
123 
124 uint32_t coredump_registered_count = 0;
125 
126 struct kern_coredump_core *kernel_helper = NULL;
127 
128 static struct kern_coredump_core *
kern_register_coredump_helper_internal(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t xnu_callback,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)129 kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
130     void *refcon, const char *core_description, boolean_t xnu_callback, boolean_t is64bit,
131     uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
132 {
133 	struct kern_coredump_core *core_helper = NULL;
134 	kern_coredump_callback_config *core_callbacks = NULL;
135 
136 	if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) {
137 		return NULL;
138 	}
139 	if (kc_callbacks == NULL) {
140 		return NULL;
141 	}
142 	;
143 	if (core_description == NULL) {
144 		return NULL;
145 	}
146 
147 	if (kc_callbacks->kcc_coredump_get_summary == NULL ||
148 	    kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
149 	    kc_callbacks->kcc_coredump_save_segment_data == NULL ||
150 	    kc_callbacks->kcc_coredump_save_thread_state == NULL) {
151 		return NULL;
152 	}
153 
154 #pragma clang diagnostic push
155 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
156 	legacy_sw_vers_registered_cb legacy_vers_callback = kc_callbacks->kcc_coredump_save_sw_vers;
157 #pragma clang diagnostic pop
158 
159 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
160 		if (legacy_vers_callback == NULL &&
161 		    kc_callbacks->kcc_coredump_save_sw_vers_detail == NULL) {
162 			return NULL;
163 		}
164 	} else {
165 		if (legacy_vers_callback == NULL) {
166 			return NULL;
167 		}
168 	}
169 
170 
171 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
172 		/* Either all note related callbacks should be set or none should be set */
173 		if ((kc_callbacks->kcc_coredump_save_note_summary == NULL) != (kc_callbacks->kcc_coredump_save_note_descriptions == NULL)) {
174 			return NULL;
175 		}
176 		if ((kc_callbacks->kcc_coredump_save_note_descriptions == NULL) != (kc_callbacks->kcc_coredump_save_note_data == NULL)) {
177 			return NULL;
178 		}
179 	}
180 
181 
182 #if !defined(__LP64__)
183 	/* We don't support generating 64-bit cores on 32-bit platforms */
184 	if (is64bit) {
185 		return NULL;
186 	}
187 #endif
188 
189 	core_helper = zalloc_permanent_type(struct kern_coredump_core);
190 	core_helper->kcc_next = NULL;
191 	core_helper->kcc_refcon = refcon;
192 	if (xnu_callback) {
193 		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
194 	} else {
195 		/* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */
196 		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.8s-coproc", core_description);
197 	}
198 	core_helper->kcc_is64bit = is64bit;
199 	core_helper->kcc_mh_magic = mh_magic;
200 	core_helper->kcc_cpu_type = cpu_type;
201 	core_helper->kcc_cpu_subtype = cpu_subtype;
202 	core_callbacks = &core_helper->kcc_cb;
203 
204 	core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
205 	core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
206 	core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
207 	core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
208 	core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
209 #pragma clang diagnostic push
210 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
211 	core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;
212 #pragma clang diagnostic pop
213 
214 
215 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
216 		core_callbacks->kcc_coredump_save_note_summary = kc_callbacks->kcc_coredump_save_note_summary;
217 		core_callbacks->kcc_coredump_save_note_descriptions = kc_callbacks->kcc_coredump_save_note_descriptions;
218 		core_callbacks->kcc_coredump_save_note_data = kc_callbacks->kcc_coredump_save_note_data;
219 		core_callbacks->kcc_coredump_save_sw_vers_detail = kc_callbacks->kcc_coredump_save_sw_vers_detail;
220 	}
221 
222 	if (xnu_callback) {
223 		assert(kernel_helper == NULL);
224 		kernel_helper = core_helper;
225 	} else {
226 		do {
227 			core_helper->kcc_next = kern_coredump_core_list;
228 		} while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
229 	}
230 
231 	OSAddAtomic(1, &coredump_registered_count);
232 	kprintf("Registered coredump handler for %s\n", core_description);
233 
234 	return core_helper;
235 }
236 
237 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)238 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
239     void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic,
240     cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
241 {
242 	if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) {
243 		return KERN_RESOURCE_SHORTAGE;
244 	}
245 
246 	if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, FALSE,
247 	    is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) {
248 		return KERN_INVALID_ARGUMENT;
249 	}
250 
251 	return KERN_SUCCESS;
252 }
253 
254 kern_return_t
kern_register_xnu_coredump_helper(kern_coredump_callback_config * kc_callbacks)255 kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks)
256 {
257 #if defined(__LP64__)
258 	boolean_t is64bit = TRUE;
259 #else
260 	boolean_t is64bit = FALSE;
261 #endif
262 
263 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", TRUE, is64bit,
264 	    _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
265 		return KERN_FAILURE;
266 	}
267 
268 	return KERN_SUCCESS;
269 }
270 
271 /*
272  * Save LC_NOTE metadata about the core we are going to write before we write the mach header
273  */
274 static int
coredump_save_note_summary(uint64_t core_note_count,uint64_t core_note_byte_count,void * context)275 coredump_save_note_summary(uint64_t core_note_count, uint64_t core_note_byte_count, void *context)
276 {
277 	processor_core_context *core_context = (processor_core_context *)context;
278 
279 	if (!core_note_count || !core_note_byte_count || !context) {
280 		return KERN_INVALID_ARGUMENT;
281 	}
282 
283 	core_context->core_note_count = core_context->core_notes_remaining = core_note_count;
284 	core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = core_note_byte_count;
285 
286 	return KERN_SUCCESS;
287 }
288 
289 /*
290  * Save metadata about the core we're about to write, write out the mach header
291  */
292 static int
coredump_save_summary(uint64_t core_segment_count,uint64_t core_byte_count,uint64_t thread_count,uint64_t thread_state_size,__unused uint64_t misc_bytes_count,void * context)293 coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count,
294     uint64_t thread_count, uint64_t thread_state_size,
295     __unused uint64_t misc_bytes_count, void *context)
296 {
297 	processor_core_context *core_context = (processor_core_context *)context;
298 	uint32_t sizeofcmds = 0, numcmds = 0;
299 	bool should_skip = false;
300 	int ret = 0;
301 
302 	if (!core_segment_count || !core_byte_count || !thread_count || !thread_state_size
303 	    || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) {
304 		return KERN_INVALID_ARGUMENT;
305 	}
306 
307 	/* Initialize core_context */
308 	core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count;
309 	core_context->core_segment_bytes_remaining = core_context->core_segment_byte_total = core_byte_count;
310 	core_context->core_threads_remaining = core_context->core_thread_count = thread_count;
311 	core_context->core_thread_state_size = thread_state_size;
312 
313 	/* Account for the LC_NOTE needed to store version/load information */
314 	core_context->core_note_count = core_context->core_notes_remaining = (core_context->core_note_count + 1);
315 	size_t vers_note_length = sizeof(main_bin_spec);
316 	if (core_context->core_config->kcc_coredump_save_sw_vers_detail == NULL) {
317 		vers_note_length = sizeof(legacy_bin_spec);
318 	}
319 	core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = (core_context->core_note_bytes_total + vers_note_length);
320 
321 #if defined(__LP64__)
322 	if (core_context->core_is64bit) {
323 		sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) +
324 		    (core_context->core_threads_remaining * core_context->core_thread_state_size) +
325 		    (core_context->core_note_count * sizeof(struct note_command)));
326 		core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64);
327 	} else
328 #endif /* defined(__LP64__) */
329 	{
330 		sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) +
331 		    (core_context->core_threads_remaining * core_context->core_thread_state_size) +
332 		    (core_context->core_note_count * sizeof(struct note_command)));
333 		core_context->core_header_size = sizeofcmds + sizeof(struct mach_header);
334 	}
335 
336 
337 	core_context->core_total_bytes = core_context->core_header_size + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
338 	core_context->core_file_length = round_page(core_context->core_header_size) + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
339 	core_context->core_cur_foffset = round_page(core_context->core_header_size);
340 
341 	numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + core_context->core_note_count);
342 
343 	/*
344 	 * Reset the zstream and other output context before writing any data out. We do this here
345 	 * to update the total file length on the outvars before we start writing out.
346 	 */
347 	ret = kdp_reset_output_vars(core_context->core_outvars, core_context->core_file_length, true, &should_skip);
348 	if (ret != KERN_SUCCESS) {
349 		kern_coredump_log(context, "%s() : failed to reset the out vars : kdp_reset_output_vars(%p, %llu, true, %p) returned error 0x%x\n",
350 		    __func__, core_context->core_outvars, core_context->core_file_length, &should_skip, ret);
351 		return ret;
352 	}
353 
354 	if (should_skip) {
355 		core_context->core_should_be_skipped = TRUE;
356 		return KERN_SUCCESS;
357 	}
358 
359 	/* Construct core file header */
360 #if defined(__LP64__)
361 	if (core_context->core_is64bit) {
362 		struct mach_header_64 core_header = { };
363 
364 		core_header.magic = core_context->core_mh_magic;
365 		core_header.cputype = core_context->core_cpu_type;
366 		core_header.cpusubtype = core_context->core_cpu_subtype;
367 		core_header.filetype = MH_CORE;
368 		core_header.ncmds = numcmds;
369 		core_header.sizeofcmds = sizeofcmds;
370 		core_header.flags = 0;
371 
372 		/* Send the core_header to the output procedure */
373 		ret =  kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
374 		if (ret != KERN_SUCCESS) {
375 			kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
376 			    __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
377 			return ret;
378 		}
379 
380 		core_context->core_cur_hoffset += sizeof(core_header);
381 	} else
382 #endif /* defined(__LP64__) */
383 	{
384 		struct mach_header core_header = { };
385 
386 		core_header.magic = core_context->core_mh_magic;
387 		core_header.cputype = core_context->core_cpu_type;
388 		core_header.cpusubtype = core_context->core_cpu_subtype;
389 		core_header.filetype = MH_CORE;
390 		core_header.ncmds = numcmds;
391 		core_header.sizeofcmds = sizeofcmds;
392 		core_header.flags = 0;
393 
394 		/* Send the core_header to the output procedure */
395 		ret =  kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
396 		if (ret != KERN_SUCCESS) {
397 			kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
398 			    __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
399 			return ret;
400 		}
401 
402 		core_context->core_cur_hoffset += sizeof(core_header);
403 	}
404 
405 	return KERN_SUCCESS;
406 }
407 
408 static void
coredump_set_segment_name(char * seg_name,uint64_t seg_start)409 coredump_set_segment_name(char *seg_name, uint64_t seg_start)
410 {
411 	if (debug_is_in_phys_carveout(seg_start)) {
412 		strlcpy(seg_name, "pcarveout", 16);
413 		return;
414 	}
415 	if (debug_is_in_phys_carveout_metadata(seg_start)) {
416 		strlcpy(seg_name, "pcarveout_md", 16);
417 		return;
418 	}
419 	seg_name[0] = 0;
420 }
421 
422 /*
423  * Construct a segment command for the specified segment.
424  */
425 static int
coredump_save_segment_descriptions(uint64_t seg_start,uint64_t seg_end,void * context)426 coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end,
427     void *context)
428 {
429 	processor_core_context *core_context = (processor_core_context *)context;
430 	int ret;
431 	uint64_t size = seg_end - seg_start;
432 
433 	if (seg_end <= seg_start) {
434 		kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
435 		    __func__, seg_start, seg_end, context, seg_start, seg_end);
436 		return KERN_INVALID_ARGUMENT;
437 	}
438 
439 	if (core_context->core_segments_remaining == 0) {
440 		kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
441 		    __func__, seg_start, seg_end, context, core_context->core_segment_count);
442 		return KERN_INVALID_ARGUMENT;
443 	}
444 
445 	/* Construct segment command */
446 #if defined(__LP64__)
447 	if (core_context->core_is64bit) {
448 		struct segment_command_64 seg_command = { };
449 
450 		if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
451 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
452 			    __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
453 			return KERN_NO_SPACE;
454 		}
455 
456 		seg_command.cmd = LC_SEGMENT_64;
457 		seg_command.cmdsize = sizeof(seg_command);
458 		coredump_set_segment_name(&seg_command.segname[0], seg_start);
459 		seg_command.vmaddr = seg_start;
460 		seg_command.vmsize = size;
461 		seg_command.fileoff = core_context->core_cur_foffset;
462 		seg_command.filesize = size;
463 		seg_command.maxprot = VM_PROT_READ;
464 		seg_command.initprot = VM_PROT_READ;
465 
466 		/* Flush new command to output */
467 		ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
468 		if (ret != KERN_SUCCESS) {
469 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
470 			    __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
471 			    core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
472 			return ret;
473 		}
474 
475 		core_context->core_cur_hoffset += sizeof(seg_command);
476 	} else
477 #endif /* defined(__LP64__) */
478 	{
479 		struct segment_command seg_command = { };
480 
481 		if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) {
482 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
483 			    __func__, seg_start, seg_end, context, seg_start, seg_end);
484 			return KERN_INVALID_ARGUMENT;
485 		}
486 
487 		if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
488 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
489 			    __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
490 			return KERN_NO_SPACE;
491 		}
492 
493 		seg_command.cmd = LC_SEGMENT;
494 		seg_command.cmdsize = sizeof(seg_command);
495 		coredump_set_segment_name(&seg_command.segname[0], seg_start);
496 		seg_command.vmaddr = (uint32_t) seg_start;
497 		seg_command.vmsize = (uint32_t) size;
498 		seg_command.fileoff = (uint32_t) core_context->core_cur_foffset;
499 		seg_command.filesize = (uint32_t) size;
500 		seg_command.maxprot = VM_PROT_READ;
501 		seg_command.initprot = VM_PROT_READ;
502 
503 		/* Flush new command to output */
504 		ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
505 		if (ret != KERN_SUCCESS) {
506 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned  error 0x%x\n",
507 			    __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
508 			    core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
509 			return ret;
510 		}
511 
512 		core_context->core_cur_hoffset += sizeof(seg_command);
513 	}
514 
515 	/* Update coredump context */
516 	core_context->core_segments_remaining--;
517 	core_context->core_cur_foffset += size;
518 
519 	return KERN_SUCCESS;
520 }
521 
522 /*
523  * Construct a LC_NOTE command for the specified note
524  */
525 static int
coredump_save_note_description(const char * data_owner,uint64_t length,void * context)526 coredump_save_note_description(const char * data_owner, uint64_t length, void *context)
527 {
528 	processor_core_context *core_context = (processor_core_context *)context;
529 	int ret;
530 
531 	if (data_owner == NULL || (strlen(data_owner) == 0)) {
532 		kern_coredump_log(context, "%s() called with invalid data_owner\n", __func__);
533 		return KERN_INVALID_ARGUMENT;
534 	}
535 
536 	if (core_context->core_notes_remaining == 0) {
537 		kern_coredump_log(context, "%s() called too many times, %llu note descriptions already recorded\n",
538 		    __func__, core_context->core_note_count);
539 		return KERN_INVALID_ARGUMENT;
540 	}
541 
542 	struct note_command note = { .cmd = LC_NOTE,
543 		                     .cmdsize = sizeof(struct note_command),
544 		                     .offset = core_context->core_cur_foffset,
545 		                     .size = length, };
546 	strlcpy((char *) &note.data_owner, data_owner, sizeof(note.data_owner));
547 
548 	/* Flush new command to output */
549 	ret = kdp_core_output(core_context->core_outvars, sizeof(note), (caddr_t)&note);
550 	if (ret != KERN_SUCCESS) {
551 		kern_coredump_log(context, "%s() : failed to write note %llu of %llu : kdp_core_output() returned  error 0x%x\n",
552 		    __func__, core_context->core_note_count - core_context->core_notes_remaining,
553 		    core_context->core_note_count, ret);
554 		return ret;
555 	}
556 
557 	/* Update coredump context */
558 	core_context->core_cur_foffset += length;
559 	core_context->core_cur_hoffset += sizeof(note);
560 	core_context->core_notes_remaining--;
561 
562 	return KERN_SUCCESS;
563 }
564 
565 /*
566  * Save thread state.
567  *
568  * Passed thread_state is expected to be a struct thread_command
569  */
570 static int
coredump_save_thread_state(void * thread_state,void * context)571 coredump_save_thread_state(void *thread_state, void *context)
572 {
573 	processor_core_context *core_context = (processor_core_context *)context;
574 	struct thread_command *tc = (struct thread_command *)thread_state;
575 	int ret;
576 
577 	if (tc->cmd != LC_THREAD) {
578 		kern_coredump_log(context, "%s() : found %d expected LC_THREAD (%d)\n", __func__, tc->cmd, LC_THREAD);
579 		return KERN_INVALID_ARGUMENT;
580 	}
581 
582 	if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) {
583 		kern_coredump_log(context, "%s() : ran out of space to save threads with %llu of %llu remaining\n", __func__,
584 		    core_context->core_threads_remaining, core_context->core_thread_count);
585 		return KERN_NO_SPACE;
586 	}
587 
588 	ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state);
589 	if (ret != KERN_SUCCESS) {
590 		kern_coredump_log(context, "%s() : failed to write thread data : kdp_core_output() returned 0x%x\n", __func__, ret);
591 		return ret;
592 	}
593 
594 	core_context->core_threads_remaining--;
595 	core_context->core_cur_hoffset += core_context->core_thread_state_size;
596 
597 	return KERN_SUCCESS;
598 }
599 
600 static int
coredump_save_segment_data(void * seg_data,uint64_t length,void * context)601 coredump_save_segment_data(void *seg_data, uint64_t length, void *context)
602 {
603 	int ret;
604 	processor_core_context *core_context = (processor_core_context *)context;
605 
606 	if (length > core_context->core_segment_bytes_remaining) {
607 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
608 		    seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining,
609 		    core_context->core_segment_bytes_remaining);
610 		return KERN_INVALID_ARGUMENT;
611 	}
612 
613 	ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data);
614 	if (ret != KERN_SUCCESS) {
615 		kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
616 		    core_context->core_segment_bytes_remaining, ret);
617 		return ret;
618 	}
619 
620 	core_context->core_segment_bytes_remaining -= length;
621 	core_context->core_cur_foffset += length;
622 
623 	return KERN_SUCCESS;
624 }
625 
626 static int
coredump_save_note_data(void * note_data,uint64_t length,void * context)627 coredump_save_note_data(void *note_data, uint64_t length, void *context)
628 {
629 	int ret;
630 	processor_core_context *core_context = (processor_core_context *)context;
631 
632 	if (length > core_context->core_note_bytes_remaining) {
633 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
634 		    note_data, length, context, core_context->core_note_bytes_total - core_context->core_note_bytes_remaining,
635 		    core_context->core_note_bytes_remaining);
636 		return KERN_INVALID_ARGUMENT;
637 	}
638 
639 	ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)note_data);
640 	if (ret != KERN_SUCCESS) {
641 		kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
642 		    core_context->core_note_bytes_remaining, ret);
643 		return ret;
644 	}
645 
646 	core_context->core_note_bytes_remaining -= length;
647 	core_context->core_cur_foffset += length;
648 
649 	return KERN_SUCCESS;
650 }
651 
652 static int
coredump_save_sw_vers_legacy(void * sw_vers,uint64_t length,void * context)653 coredump_save_sw_vers_legacy(void *sw_vers, uint64_t length, void *context)
654 {
655 	processor_core_context *core_context = (processor_core_context *)context;
656 	int ret;
657 
658 	if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) {
659 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with invalid length %llu\n", __func__,
660 		    sw_vers, length, context, length);
661 		return KERN_INVALID_ARGUMENT;
662 	}
663 
664 	uint32_t version = LEGACY_BIN_SPEC_VERSION;
665 	ret = coredump_save_note_data(&version, sizeof(version), context);
666 	if (ret != KERN_SUCCESS) {
667 		kern_coredump_log(context, "%s() : failed to write legacy bin spec version : coredump_save_note_data() returned 0x%x\n",
668 		    __func__, ret);
669 		return ret;
670 	}
671 
672 	ret = coredump_save_note_data(sw_vers, length, context);
673 	if (ret != KERN_SUCCESS) {
674 		kern_coredump_log(context, "%s() : failed to write sw_vers string : coredump_save_note_data() returned 0x%x\n",
675 		    __func__, ret);
676 		return ret;
677 	}
678 
679 	if (length < KERN_COREDUMP_VERSIONSTRINGMAXSIZE) {
680 		/* Zero fill to the full size */
681 		uint64_t length_to_zero = (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length);
682 		ret = kdp_core_output(core_context->core_outvars, length_to_zero, NULL);
683 		if (ret != KERN_SUCCESS) {
684 			kern_coredump_log(context, "%s() : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
685 			    __func__, core_context->core_outvars, length_to_zero, ret);
686 			return ret;
687 		}
688 
689 		core_context->core_note_bytes_remaining -= length_to_zero;
690 		core_context->core_cur_foffset += length_to_zero;
691 	}
692 
693 	return KERN_SUCCESS;
694 }
695 
696 static int
coredump_save_sw_vers(uint64_t address,uuid_t uuid,uint32_t log2_pagesize,void * context)697 coredump_save_sw_vers(uint64_t address, uuid_t uuid, uint32_t log2_pagesize, void *context)
698 {
699 	processor_core_context *core_context = (processor_core_context *)context;
700 	int ret;
701 
702 	uint32_t type = core_context->core_isxnu ? MAIN_BIN_SPEC_TYPE_KERNEL : MAIN_BIN_SPEC_TYPE_STANDALONE;
703 	main_bin_spec spec = { .version = MAIN_BIN_SPEC_VERSION,
704 		               .type = type,
705 		               .address = address,
706 		               .log2_pagesize = log2_pagesize, };
707 	uuid_copy(*((uuid_t *)&spec.uuid), uuid);
708 
709 	ret = coredump_save_note_data(&spec, sizeof(spec), context);
710 	if (ret != KERN_SUCCESS) {
711 		kern_coredump_log(context, "%s() : failed to write main bin spec structure : coredump_save_note_data() returned 0x%x\n", __func__, ret);
712 		return ret;
713 	}
714 
715 	return KERN_SUCCESS;
716 }
717 
718 static kern_return_t
kern_coredump_routine(void * core_outvars,struct kern_coredump_core * current_core,uint64_t core_begin_offset,uint64_t * core_file_length,boolean_t * header_update_failed,boolean_t is_xnu)719 kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_core, uint64_t core_begin_offset, uint64_t *core_file_length, boolean_t *header_update_failed, boolean_t is_xnu)
720 {
721 	kern_return_t ret;
722 	processor_core_context context = { };
723 	*core_file_length = 0;
724 	*header_update_failed = FALSE;
725 
726 	/* Setup the coredump context */
727 	context.core_outvars = core_outvars;
728 	context.core_config = &current_core->kcc_cb;
729 	context.core_refcon = current_core->kcc_refcon;
730 	context.core_is64bit = current_core->kcc_is64bit;
731 	context.core_mh_magic = current_core->kcc_mh_magic;
732 	context.core_cpu_type = current_core->kcc_cpu_type;
733 	context.core_cpu_subtype = current_core->kcc_cpu_subtype;
734 	context.core_isxnu = is_xnu;
735 
736 	kern_coredump_log(&context, "\nBeginning coredump of %s\n", current_core->kcc_corename);
737 
738 	if (current_core->kcc_cb.kcc_coredump_init != NULL) {
739 		ret = current_core->kcc_cb.kcc_coredump_init(context.core_refcon, &context);
740 		if (ret == KERN_NODE_DOWN) {
741 			kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
742 			return KERN_SUCCESS;
743 		} else if (ret != KERN_SUCCESS) {
744 			kern_coredump_log(&context, "(%s) : coredump_init failed with %d\n", __func__, ret);
745 			return ret;
746 		}
747 	}
748 
749 	/* Retrieve information about LC_NOTE data we will write out as part of the core before we populate the general header */
750 	if (current_core->kcc_cb.kcc_coredump_save_note_summary != NULL) {
751 		ret = current_core->kcc_cb.kcc_coredump_save_note_summary(context.core_refcon, coredump_save_note_summary, &context);
752 		if (ret != KERN_SUCCESS) {
753 			kern_coredump_log(&context, "(%s) : save_note_note_summary failed with %d\n", __func__, ret);
754 			return ret;
755 		}
756 	}
757 
758 	/* Populate the context with metadata about the corefile (cmd info, sizes etc) */
759 	ret = current_core->kcc_cb.kcc_coredump_get_summary(context.core_refcon, coredump_save_summary, &context);
760 	if (ret != KERN_SUCCESS) {
761 		kern_coredump_log(&context, "(%s) : get_summary failed with %d\n", __func__, ret);
762 		return ret;
763 	}
764 
765 	if (context.core_should_be_skipped) {
766 		kern_coredump_log(&context, "Skipping coredump\n");
767 		return KERN_SUCCESS;
768 	}
769 
770 	if (context.core_header_size == 0) {
771 		kern_coredump_log(&context, "(%s) : header size not populated after coredump_get_summary\n", __func__);
772 		return KERN_FAILURE;
773 	}
774 
775 	/* Save the segment descriptions for the segments to be included */
776 	ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions,
777 	    &context);
778 	if (ret != KERN_SUCCESS) {
779 		kern_coredump_log(&context, "(%s) : save_segment_descriptions failed with %d\n", __func__, ret);
780 		return ret;
781 	}
782 
783 	if (context.core_segments_remaining != 0) {
784 		kern_coredump_log(&context, "(%s) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
785 		    __func__, context.core_segments_remaining, context.core_segment_count);
786 		return KERN_FAILURE;
787 	}
788 
789 	/* write out the LC_NOTE with the binary info */
790 	if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
791 		ret = coredump_save_note_description(DATA_OWNER_MAIN_BIN_SPEC, sizeof(main_bin_spec), &context);
792 	} else {
793 		ret = coredump_save_note_description(DATA_OWNER_LEGACY_BIN_SPEC, sizeof(legacy_bin_spec), &context);
794 	}
795 	if (ret != KERN_SUCCESS) {
796 		kern_coredump_log(&context, "(%s) : coredump_save_note_description returned %d while writing binary info LC_NOTE description", __func__, ret);
797 		return ret;
798 	}
799 
800 	/* Save LC_NOTE desciptions for any additional notes to be included */
801 	if (current_core->kcc_cb.kcc_coredump_save_note_descriptions != NULL) {
802 		ret = current_core->kcc_cb.kcc_coredump_save_note_descriptions(context.core_refcon, coredump_save_note_description, &context);
803 		if (ret != KERN_SUCCESS) {
804 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_descriptions failed with %d\n", __func__, ret);
805 			return ret;
806 		}
807 	}
808 
809 	if (context.core_notes_remaining != 0) {
810 		kern_coredump_log(&context, "(%s) : save_note_descriptions returned without all note descriptions written, %llu of %llu remaining\n",
811 		    __func__, context.core_notes_remaining, context.core_note_count);
812 		return KERN_FAILURE;
813 	}
814 
815 	/*
816 	 * Save the thread commands/state
817 	 *
818 	 * TODO: Should this buffer be allocated at boot rather than on the stack?
819 	 */
820 	if (context.core_thread_state_size) {
821 		char threadstatebuf[context.core_thread_state_size];
822 		ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state,
823 		    &context);
824 		if (ret != KERN_SUCCESS) {
825 			kern_coredump_log(&context, "(%s) : save_thread_state failed with %d\n", __func__, ret);
826 			return ret;
827 		}
828 	}
829 
830 	if (context.core_threads_remaining != 0) {
831 		kern_coredump_log(&context, "(%s) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
832 		    __func__, context.core_threads_remaining, context.core_thread_count);
833 		return KERN_FAILURE;
834 	}
835 	assert(context.core_cur_hoffset == context.core_header_size);
836 
837 	/* Zero fill between the end of the header and the beginning of the segment data file offset */
838 	ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL);
839 	if (ret != KERN_SUCCESS) {
840 		kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
841 		    context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret);
842 		return ret;
843 	}
844 
845 	/* Reset our local current file offset before we start writing out segment data */
846 	context.core_cur_foffset = round_page(context.core_header_size);
847 
848 	ret = current_core->kcc_cb.kcc_coredump_save_segment_data(context.core_refcon, coredump_save_segment_data, &context);
849 	if (ret != KERN_SUCCESS) {
850 		kern_coredump_log(&context, "coredump_save_segment_data failed with %d\n", ret);
851 		return ret;
852 	}
853 
854 	if (context.core_segment_bytes_remaining != 0) {
855 		kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
856 		    context.core_segment_bytes_remaining, context.core_segment_byte_total);
857 		return KERN_FAILURE;
858 	}
859 
860 	/* Save out the LC_NOTE segment data, starting with the binary info / sw vers one */
861 	if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
862 		ret = current_core->kcc_cb.kcc_coredump_save_sw_vers_detail(context.core_refcon, coredump_save_sw_vers, &context);
863 		if (ret != KERN_SUCCESS) {
864 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers_detail_cb failed with 0x%x\n", __func__, ret);
865 			return ret;
866 		}
867 	} else {
868 #pragma clang diagnostic push
869 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
870 		ret = current_core->kcc_cb.kcc_coredump_save_sw_vers(context.core_refcon, coredump_save_sw_vers_legacy, &context);
871 #pragma clang diagnostic pop
872 		if (ret != KERN_SUCCESS) {
873 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers failed with 0x%x\n", __func__, ret);
874 			return ret;
875 		}
876 	}
877 
878 	if (current_core->kcc_cb.kcc_coredump_save_note_data != NULL) {
879 		ret = current_core->kcc_cb.kcc_coredump_save_note_data(context.core_refcon, coredump_save_note_data, &context);
880 		if (ret != KERN_SUCCESS) {
881 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data failed with 0x%x\n", __func__, ret);
882 			return ret;
883 		}
884 	}
885 
886 	if (context.core_note_bytes_remaining != 0) {
887 		kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data returned without all note data written, %llu of %llu remaining\n",
888 		    __func__, context.core_note_bytes_remaining, context.core_note_bytes_total);
889 		return KERN_FAILURE;
890 	}
891 
892 
893 	/* Flush the last data out */
894 	ret = kdp_core_output(context.core_outvars, 0, NULL);
895 	if (ret != KERN_SUCCESS) {
896 		kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
897 		    context.core_outvars, ret);
898 		return ret;
899 	}
900 
901 	kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
902 	    current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count,
903 	    (context.core_thread_count * context.core_thread_state_size), context.core_file_length);
904 
905 	if (core_begin_offset) {
906 		/* If we're writing to disk (we have a begin offset), we need to update the header */
907 		ret = kern_dump_record_file(context.core_outvars, current_core->kcc_corename, core_begin_offset, &context.core_file_length_compressed);
908 		if (ret != KERN_SUCCESS) {
909 			*header_update_failed = TRUE;
910 			kern_coredump_log(&context, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret);
911 			return ret;
912 		}
913 	}
914 
915 	kern_coredump_log(&context, " Compressed file length is %llu bytes\n", context.core_file_length_compressed);
916 
917 	*core_file_length = context.core_file_length_compressed;
918 
919 	return KERN_SUCCESS;
920 }
921 
922 kern_return_t
kern_do_coredump(void * core_outvars,boolean_t kernel_only,uint64_t first_file_offset,uint64_t * last_file_offset)923 kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_offset, uint64_t *last_file_offset)
924 {
925 	struct kern_coredump_core *current_core = NULL;
926 	uint64_t prev_core_length = 0;
927 	kern_return_t cur_ret = KERN_SUCCESS, ret = KERN_SUCCESS;
928 	boolean_t header_update_failed = FALSE;
929 
930 	assert(last_file_offset != NULL);
931 
932 	*last_file_offset = first_file_offset;
933 	cur_ret = kern_coredump_routine(core_outvars, kernel_helper, *last_file_offset, &prev_core_length, &header_update_failed, TRUE);
934 	if (cur_ret != KERN_SUCCESS) {
935 		// As long as we didn't fail while updating the header for the raw file, we should be able to try
936 		// to capture other corefiles.
937 		if (header_update_failed) {
938 			// The header may be in an inconsistent state, so bail now
939 			return KERN_FAILURE;
940 		} else {
941 			prev_core_length = 0;
942 			ret = KERN_FAILURE;
943 		}
944 	}
945 
946 	*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
947 	prev_core_length = 0;
948 
949 	if (kernel_only) {
950 		return ret;
951 	}
952 
953 	current_core = kern_coredump_core_list;
954 	while (current_core) {
955 		/* Seek to the beginning of the next file */
956 		cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
957 		if (cur_ret != KERN_SUCCESS) {
958 			kern_coredump_log(NULL, "Failed to seek to beginning of next core\n");
959 			return KERN_FAILURE;
960 		}
961 
962 		cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, &header_update_failed, FALSE);
963 		if (cur_ret != KERN_SUCCESS) {
964 			// As long as we didn't fail while updating the header for the raw file, we should be able to try
965 			// to capture other corefiles.
966 			if (header_update_failed) {
967 				// The header may be in an inconsistent state, so bail now
968 				return KERN_FAILURE;
969 			} else {
970 				// Try to capture other corefiles even if one failed, update the overall return
971 				// status though
972 				prev_core_length = 0;
973 				ret = KERN_FAILURE;
974 			}
975 		}
976 
977 		/* Calculate the offset of the beginning of the next core in the raw file */
978 		*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
979 		prev_core_length = 0;
980 		current_core = current_core->kcc_next;
981 	}
982 
983 	return ret;
984 }
985 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
986 
987 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)988 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks, void* refcon,
989     const char *core_description, boolean_t is64bit, uint32_t mh_magic,
990     cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
991 {
992 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
993 	return KERN_NOT_SUPPORTED;
994 }
995 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
996 
997 /*
998  * Must be callable with a NULL context
999  */
1000 void
kern_coredump_log(void * context,const char * string,...)1001 kern_coredump_log(void *context, const char *string, ...)
1002 {
1003 #pragma unused(context)
1004 	va_list coredump_log_args;
1005 
1006 	va_start(coredump_log_args, string);
1007 	_doprnt(string, &coredump_log_args, consdebug_putc, 16);
1008 	va_end(coredump_log_args);
1009 
1010 #if defined(__arm__) || defined(__arm64__)
1011 	paniclog_flush();
1012 #endif
1013 }
1014