xref: /xnu-10063.101.15/osfmk/kdp/processor_core.c (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kdp/core_notes.h>
32 #include <kern/assert.h>
33 #include <kern/monotonic.h>
34 #include <kern/zalloc.h>
35 #include <libkern/kernel_mach_header.h>
36 #include <libkern/OSAtomic.h>
37 #include <libsa/types.h>
38 #include <pexpert/pexpert.h>
39 #include <vm/vm_map.h>
40 
41 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
42 
43 #define roundup(x, y)   ((((x) % (y)) == 0) ? \
44 	                (x) : ((x) + ((y) - ((x) % (y)))))
45 
46 #define DATA_OWNER_LEGACY_BIN_SPEC "kern ver str"
47 /*
48  * Format of the legacy bin spec (LC_IDENT-like) LC_NOTE payload as expected by LLDB
49  */
50 typedef struct {
51 	uint32_t version; // currently 1
52 	char version_string[KERN_COREDUMP_VERSIONSTRINGMAXSIZE];
53 } __attribute__((packed)) legacy_bin_spec;
54 #define LEGACY_BIN_SPEC_VERSION 1
55 
56 __enum_closed_decl(kern_coredump_type_t, uint8_t, {
57 	XNU_COREDUMP,
58 	USERSPACE_COREDUMP,
59 	COPROCESSOR_COREDUMP,
60 	SECURE_COREDUMP,
61 	NUM_COREDUMP_TYPES,
62 });
63 
64 static uint32_t bin_spec_map[NUM_COREDUMP_TYPES] = {
65 	[XNU_COREDUMP] = MAIN_BIN_SPEC_TYPE_KERNEL,
66 	[USERSPACE_COREDUMP] = MAIN_BIN_SPEC_TYPE_USER,
67 	[COPROCESSOR_COREDUMP] = MAIN_BIN_SPEC_TYPE_STANDALONE,
68 	[SECURE_COREDUMP] = MAIN_BIN_SPEC_TYPE_STANDALONE
69 };
70 
71 /*
72  * The processor_core_context structure describes the current
73  * corefile that's being generated. It also includes a pointer
74  * to the core_outvars which is used by the KDP code for context
75  * about the specific output mechanism being used.
76  *
77  * We include *remaining variables to catch inconsistencies / bugs
78  * in the co-processor coredump callbacks.
79  */
80 typedef struct {
81 	struct kdp_core_out_vars * core_outvars;     /* Output procedure info (see kdp_out_stage.h) */
82 	kern_coredump_callback_config *core_config;  /* Information about core currently being dumped */
83 	void *core_refcon;                           /* Reference constant associated with the coredump helper */
84 	boolean_t core_should_be_skipped;            /* Indicates whether this specific core should not be dumped */
85 	boolean_t core_is64bit;                      /* Bitness of CPU */
86 	kern_coredump_type_t core_type;              /* Indicates type of this core*/
87 	uint32_t core_mh_magic;                      /* Magic for mach header */
88 	cpu_type_t core_cpu_type;                    /* CPU type for mach header */
89 	cpu_subtype_t core_cpu_subtype;              /* CPU subtype for mach header */
90 	uint64_t core_file_length;                   /* Overall corefile length including any zero padding */
91 	uint64_t core_file_length_compressed;        /* File length after compression */
92 	uint64_t core_segment_count;                 /* Number of LC_SEGMENTs in the core currently being dumped */
93 	uint64_t core_segments_remaining;            /* Number of LC_SEGMENTs that have not been added to the header */
94 	uint64_t core_segment_byte_total;            /* Sum of all the data from the LC_SEGMENTS in the core */
95 	uint64_t core_segment_bytes_remaining;       /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
96 	uint64_t core_thread_count;                  /* Number of LC_THREADs to be included */
97 	uint64_t core_threads_remaining;             /* Number of LC_THREADs that have yet to be included */
98 	uint64_t core_thread_state_size;             /* Size of each LC_THREAD */
99 	uint64_t core_note_count;                    /* Number of LC_NOTEs to be included */
100 	uint64_t core_notes_remaining;               /* Number of LC_NOTEs that have not been added to the header */
101 	uint64_t core_note_bytes_total;              /* Sum of all data from the LC_NOTE segments in the core */
102 	uint64_t core_note_bytes_remaining;          /* Quantity of data remaining from LC_NOTEs that have yet to be added */
103 	uint64_t core_cur_hoffset;                   /* Current offset in this core's header */
104 	uint64_t core_cur_foffset;                   /* Current offset in this core's overall file */
105 	uint64_t core_header_size;                   /* Size of this core's header */
106 	uint64_t core_total_bytes;                   /* Total amount of data to be included in this core (excluding zero fill) */
107 } processor_core_context;
108 
109 /*
110  * The kern_coredump_core structure describes a core that has been
111  * registered for use by the coredump mechanism.
112  */
113 struct kern_coredump_core {
114 	struct kern_coredump_core *kcc_next;             /* Next processor to dump */
115 	void *kcc_refcon;                                /* Reference constant to be passed to callbacks */
116 	char kcc_corename[MACH_CORE_FILEHEADER_NAMELEN]; /* Description of this processor */
117 	boolean_t kcc_is64bit;                           /* Processor bitness */
118 	uint32_t kcc_mh_magic;                           /* Magic for mach header */
119 	cpu_type_t kcc_cpu_type;                         /* CPU type for mach header */
120 	cpu_subtype_t kcc_cpu_subtype;                   /* CPU subtype for mach header */
121 	kern_coredump_callback_config kcc_cb;            /* Registered processor callbacks for coredump */
122 };
123 
124 struct kern_coredump_core * kern_coredump_core_list = NULL;
125 struct kern_coredump_core * kern_userspace_coredump_core_list = NULL;
126 LCK_GRP_DECLARE(kern_userspace_coredump_core_list_lock_grp, "userspace coredump list");
127 LCK_MTX_DECLARE(kern_userspace_coredump_core_list_lock, &kern_userspace_coredump_core_list_lock_grp);
128 
129 typedef kern_return_t (*legacy_sw_vers_registered_cb)(void *refcon, core_save_sw_vers_cb callback, void *context);
130 
131 uint32_t coredump_registered_count = 0;
132 
133 struct kern_coredump_core *kernel_helper = NULL;
134 struct kern_coredump_core *sk_helper = NULL;
135 
136 static struct kern_coredump_core *
kern_register_coredump_helper_internal(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,kern_coredump_type_t type,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)137 kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
138     void *refcon, const char *core_description, kern_coredump_type_t type, boolean_t is64bit,
139     uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
140 {
141 	struct kern_coredump_core *core_helper = NULL;
142 	kern_coredump_callback_config *core_callbacks = NULL;
143 
144 	if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) {
145 		return NULL;
146 	}
147 	if (kc_callbacks == NULL) {
148 		return NULL;
149 	}
150 	;
151 	if (core_description == NULL) {
152 		return NULL;
153 	}
154 
155 	if (kc_callbacks->kcc_coredump_get_summary == NULL ||
156 	    kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
157 	    kc_callbacks->kcc_coredump_save_segment_data == NULL ||
158 	    kc_callbacks->kcc_coredump_save_thread_state == NULL) {
159 		return NULL;
160 	}
161 
162 #pragma clang diagnostic push
163 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
164 	legacy_sw_vers_registered_cb legacy_vers_callback = kc_callbacks->kcc_coredump_save_sw_vers;
165 #pragma clang diagnostic pop
166 
167 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
168 		if (legacy_vers_callback == NULL &&
169 		    kc_callbacks->kcc_coredump_save_sw_vers_detail == NULL) {
170 			return NULL;
171 		}
172 	} else {
173 		if (legacy_vers_callback == NULL) {
174 			return NULL;
175 		}
176 	}
177 
178 
179 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
180 		/* Either all note related callbacks should be set or none should be set */
181 		if ((kc_callbacks->kcc_coredump_save_note_summary == NULL) != (kc_callbacks->kcc_coredump_save_note_descriptions == NULL)) {
182 			return NULL;
183 		}
184 		if ((kc_callbacks->kcc_coredump_save_note_descriptions == NULL) != (kc_callbacks->kcc_coredump_save_note_data == NULL)) {
185 			return NULL;
186 		}
187 	}
188 
189 
190 #if !defined(__LP64__)
191 	/* We don't support generating 64-bit cores on 32-bit platforms */
192 	if (is64bit) {
193 		return NULL;
194 	}
195 #endif
196 
197 	core_helper = zalloc_permanent_type(struct kern_coredump_core);
198 	core_helper->kcc_next = NULL;
199 	core_helper->kcc_refcon = refcon;
200 	if (type == XNU_COREDUMP || type == USERSPACE_COREDUMP || type == SECURE_COREDUMP) {
201 		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
202 	} else {
203 		assert(type == COPROCESSOR_COREDUMP);
204 		/* Make sure there's room for the -cp suffix (16 - NULL char - strlen(-cp)) */
205 		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.12s-cp", core_description);
206 	}
207 	core_helper->kcc_is64bit = is64bit;
208 	core_helper->kcc_mh_magic = mh_magic;
209 	core_helper->kcc_cpu_type = cpu_type;
210 	core_helper->kcc_cpu_subtype = cpu_subtype;
211 	core_callbacks = &core_helper->kcc_cb;
212 
213 	core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
214 	core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
215 	core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
216 	core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
217 	core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
218 #pragma clang diagnostic push
219 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
220 	core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;
221 #pragma clang diagnostic pop
222 
223 
224 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
225 		core_callbacks->kcc_coredump_save_note_summary = kc_callbacks->kcc_coredump_save_note_summary;
226 		core_callbacks->kcc_coredump_save_note_descriptions = kc_callbacks->kcc_coredump_save_note_descriptions;
227 		core_callbacks->kcc_coredump_save_note_data = kc_callbacks->kcc_coredump_save_note_data;
228 		core_callbacks->kcc_coredump_save_sw_vers_detail = kc_callbacks->kcc_coredump_save_sw_vers_detail;
229 	}
230 
231 	if (type == XNU_COREDUMP) {
232 		assert(kernel_helper == NULL);
233 		kernel_helper = core_helper;
234 	} else if (type == SECURE_COREDUMP) {
235 		assert(sk_helper == NULL);
236 		sk_helper = core_helper;
237 	} else if (type == USERSPACE_COREDUMP) {
238 		lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
239 		core_helper->kcc_next = kern_userspace_coredump_core_list;
240 		kern_userspace_coredump_core_list = core_helper;
241 		lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
242 	} else {
243 		assert(type == COPROCESSOR_COREDUMP);
244 		do {
245 			core_helper->kcc_next = kern_coredump_core_list;
246 		} while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
247 	}
248 
249 	OSAddAtomic(1, &coredump_registered_count);
250 	kprintf("Registered coredump handler for %s\n", core_description);
251 
252 	return core_helper;
253 }
254 
255 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)256 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
257     void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic,
258     cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
259 {
260 	if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) {
261 		return KERN_RESOURCE_SHORTAGE;
262 	}
263 
264 	if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, COPROCESSOR_COREDUMP,
265 	    is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) {
266 		return KERN_INVALID_ARGUMENT;
267 	}
268 
269 	return KERN_SUCCESS;
270 }
271 
272 kern_return_t
kern_register_xnu_coredump_helper(kern_coredump_callback_config * kc_callbacks)273 kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks)
274 {
275 #if defined(__LP64__)
276 	boolean_t is64bit = TRUE;
277 #else
278 	boolean_t is64bit = FALSE;
279 #endif
280 
281 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", XNU_COREDUMP, is64bit,
282 	    _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
283 		return KERN_FAILURE;
284 	}
285 
286 	return KERN_SUCCESS;
287 }
288 
289 kern_return_t
kern_register_sk_coredump_helper(kern_coredump_callback_config * sk_callbacks,void * refcon)290 kern_register_sk_coredump_helper(kern_coredump_callback_config *sk_callbacks, void *refcon)
291 {
292 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, sk_callbacks,
293 	    refcon, "secure-kernel", SECURE_COREDUMP, TRUE, _mh_execute_header.magic,
294 	    _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
295 		return KERN_FAILURE;
296 	}
297 
298 	return KERN_SUCCESS;
299 }
300 
301 extern cpu_type_t
302 process_cpu_type(void * bsd_info);
303 
304 extern cpu_type_t
305 process_cpu_subtype(void * bsd_info);
306 
307 extern char     *proc_name_address(void *p);
308 
309 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)310 kern_register_userspace_coredump(task_t task, const char * name)
311 {
312 	kern_return_t result;
313 	struct kern_userspace_coredump_context * context = NULL;
314 	boolean_t is64bit;
315 	uint32_t mh_magic;
316 	uint32_t mh_cputype;
317 	uint32_t mh_cpusubtype;
318 	kern_coredump_callback_config userkc_callbacks;
319 
320 	is64bit = task_has_64Bit_addr(task);
321 	mh_magic = is64bit ? MH_MAGIC_64 : MH_MAGIC;
322 	mh_cputype = process_cpu_type(get_bsdtask_info(task));
323 	mh_cpusubtype = process_cpu_subtype(get_bsdtask_info(task));
324 
325 
326 	context = kalloc_type(struct kern_userspace_coredump_context, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
327 	context->task = task;
328 
329 	userkc_callbacks.kcc_coredump_init = user_dump_init;
330 	userkc_callbacks.kcc_coredump_get_summary = user_dump_save_summary;
331 	userkc_callbacks.kcc_coredump_save_segment_descriptions = user_dump_save_seg_descriptions;
332 	userkc_callbacks.kcc_coredump_save_thread_state = user_dump_save_thread_state;
333 	userkc_callbacks.kcc_coredump_save_sw_vers_detail = user_dump_save_sw_vers_detail;
334 	userkc_callbacks.kcc_coredump_save_segment_data = user_dump_save_segment_data;
335 	userkc_callbacks.kcc_coredump_save_note_summary = user_dump_save_note_summary;
336 	userkc_callbacks.kcc_coredump_save_note_descriptions = user_dump_save_note_descriptions;
337 	userkc_callbacks.kcc_coredump_save_note_data = user_dump_save_note_data;
338 
339 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, &userkc_callbacks, context, name, USERSPACE_COREDUMP, is64bit,
340 	    mh_magic, mh_cputype, mh_cpusubtype) == NULL) {
341 		result = KERN_FAILURE;
342 		goto finish;
343 	}
344 
345 	result = KERN_SUCCESS;
346 
347 finish:
348 	if (result != KERN_SUCCESS && context != NULL) {
349 		kfree_type(struct kern_userspace_coredump_context, context);
350 	}
351 
352 	return result;
353 }
354 
355 kern_return_t
kern_unregister_userspace_coredump(task_t task)356 kern_unregister_userspace_coredump(task_t task)
357 {
358 	struct kern_coredump_core * current_core = NULL;
359 	struct kern_coredump_core * previous_core = NULL;
360 
361 	lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
362 	current_core = kern_userspace_coredump_core_list;
363 	while (current_core) {
364 		struct kern_userspace_coredump_context * context = (struct kern_userspace_coredump_context *)current_core->kcc_refcon;
365 		assert(context != NULL);
366 		if (context->task == task) {
367 			/* remove current_core from the list */
368 			if (previous_core == NULL) {
369 				kern_userspace_coredump_core_list = current_core->kcc_next;
370 			} else {
371 				previous_core->kcc_next = current_core->kcc_next;
372 			}
373 			break;
374 		}
375 		previous_core = current_core;
376 		current_core = current_core->kcc_next;
377 	}
378 	lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
379 
380 	if (current_core) {
381 		kfree_type(struct kern_userspace_coredump_context, current_core->kcc_refcon);
382 		OSAddAtomic(-1, &coredump_registered_count);
383 		return KERN_SUCCESS;
384 	}
385 
386 	return KERN_NOT_FOUND;
387 }
388 
389 /*
390  * Save LC_NOTE metadata about the core we are going to write before we write the mach header
391  */
392 static int
coredump_save_note_summary(uint64_t core_note_count,uint64_t core_note_byte_count,void * context)393 coredump_save_note_summary(uint64_t core_note_count, uint64_t core_note_byte_count, void *context)
394 {
395 	processor_core_context *core_context = (processor_core_context *)context;
396 
397 	if (!core_note_count || !core_note_byte_count || !context) {
398 		return KERN_INVALID_ARGUMENT;
399 	}
400 
401 	core_context->core_note_count = core_context->core_notes_remaining = core_note_count;
402 	core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = core_note_byte_count;
403 
404 	return KERN_SUCCESS;
405 }
406 
407 /*
408  * Save metadata about the core we're about to write, write out the mach header
409  */
410 static int
coredump_save_summary(uint64_t core_segment_count,uint64_t core_byte_count,uint64_t thread_count,uint64_t thread_state_size,__unused uint64_t misc_bytes_count,void * context)411 coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count,
412     uint64_t thread_count, uint64_t thread_state_size,
413     __unused uint64_t misc_bytes_count, void *context)
414 {
415 	processor_core_context *core_context = (processor_core_context *)context;
416 	uint32_t sizeofcmds = 0, numcmds = 0;
417 	bool should_skip = false;
418 	int ret = 0;
419 
420 	if (!core_segment_count || !core_byte_count
421 	    || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) {
422 		return KERN_INVALID_ARGUMENT;
423 	}
424 
425 	/* secure coredumps may not contain any thread state. */
426 	if (core_context->core_type != SECURE_COREDUMP && (!thread_count || !thread_state_size)) {
427 		return KERN_INVALID_ARGUMENT;
428 	}
429 
430 	/* Initialize core_context */
431 	core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count;
432 	core_context->core_segment_bytes_remaining = core_context->core_segment_byte_total = core_byte_count;
433 	core_context->core_threads_remaining = core_context->core_thread_count = thread_count;
434 	core_context->core_thread_state_size = thread_state_size;
435 
436 	/* Account for the LC_NOTE needed to store version/load information */
437 	core_context->core_note_count = core_context->core_notes_remaining = (core_context->core_note_count + 1);
438 	size_t vers_note_length = sizeof(main_bin_spec_note_t);
439 	if (core_context->core_config->kcc_coredump_save_sw_vers_detail == NULL) {
440 		vers_note_length = sizeof(legacy_bin_spec);
441 	}
442 	core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = (core_context->core_note_bytes_total + vers_note_length);
443 
444 #if defined(__LP64__)
445 	if (core_context->core_is64bit) {
446 		sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) +
447 		    (core_context->core_threads_remaining * core_context->core_thread_state_size) +
448 		    (core_context->core_note_count * sizeof(struct note_command)));
449 		core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64);
450 	} else
451 #endif /* defined(__LP64__) */
452 	{
453 		sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) +
454 		    (core_context->core_threads_remaining * core_context->core_thread_state_size) +
455 		    (core_context->core_note_count * sizeof(struct note_command)));
456 		core_context->core_header_size = sizeofcmds + sizeof(struct mach_header);
457 	}
458 
459 
460 	core_context->core_total_bytes = core_context->core_header_size + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
461 	core_context->core_file_length = round_page(core_context->core_header_size) + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
462 	core_context->core_cur_foffset = round_page(core_context->core_header_size);
463 
464 	numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + core_context->core_note_count);
465 
466 	/*
467 	 * Reset the zstream and other output context before writing any data out. We do this here
468 	 * to update the total file length on the outvars before we start writing out.
469 	 */
470 	ret = kdp_reset_output_vars(core_context->core_outvars, core_context->core_file_length, true, &should_skip);
471 	if (ret != KERN_SUCCESS) {
472 		kern_coredump_log(context, "%s() : failed to reset the out vars : kdp_reset_output_vars(%p, %llu, true, %p) returned error 0x%x\n",
473 		    __func__, core_context->core_outvars, core_context->core_file_length, &should_skip, ret);
474 		return ret;
475 	}
476 
477 	if (should_skip) {
478 		core_context->core_should_be_skipped = TRUE;
479 		return KERN_SUCCESS;
480 	}
481 
482 	/* Construct core file header */
483 #if defined(__LP64__)
484 	if (core_context->core_is64bit) {
485 		struct mach_header_64 core_header = { };
486 
487 		core_header.magic = core_context->core_mh_magic;
488 		core_header.cputype = core_context->core_cpu_type;
489 		core_header.cpusubtype = core_context->core_cpu_subtype;
490 		core_header.filetype = MH_CORE;
491 		core_header.ncmds = numcmds;
492 		core_header.sizeofcmds = sizeofcmds;
493 		core_header.flags = 0;
494 
495 		/* Send the core_header to the output procedure */
496 		ret =  kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
497 		if (ret != KERN_SUCCESS) {
498 			kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
499 			    __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
500 			return ret;
501 		}
502 
503 		core_context->core_cur_hoffset += sizeof(core_header);
504 	} else
505 #endif /* defined(__LP64__) */
506 	{
507 		struct mach_header core_header = { };
508 
509 		core_header.magic = core_context->core_mh_magic;
510 		core_header.cputype = core_context->core_cpu_type;
511 		core_header.cpusubtype = core_context->core_cpu_subtype;
512 		core_header.filetype = MH_CORE;
513 		core_header.ncmds = numcmds;
514 		core_header.sizeofcmds = sizeofcmds;
515 		core_header.flags = 0;
516 
517 		/* Send the core_header to the output procedure */
518 		ret =  kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
519 		if (ret != KERN_SUCCESS) {
520 			kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
521 			    __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
522 			return ret;
523 		}
524 
525 		core_context->core_cur_hoffset += sizeof(core_header);
526 	}
527 
528 	return KERN_SUCCESS;
529 }
530 
531 /*
532  * Construct a segment command for the specified segment.
533  */
534 static int
coredump_save_segment_descriptions(uint64_t seg_start,uint64_t seg_end,void * context)535 coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end,
536     void *context)
537 {
538 	processor_core_context *core_context = (processor_core_context *)context;
539 	int ret;
540 	uint64_t size = seg_end - seg_start;
541 
542 	if (seg_end <= seg_start) {
543 		kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
544 		    __func__, seg_start, seg_end, context, seg_start, seg_end);
545 		return KERN_INVALID_ARGUMENT;
546 	}
547 
548 	if (core_context->core_segments_remaining == 0) {
549 		kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
550 		    __func__, seg_start, seg_end, context, core_context->core_segment_count);
551 		return KERN_INVALID_ARGUMENT;
552 	}
553 
554 	/* Construct segment command */
555 #if defined(__LP64__)
556 	if (core_context->core_is64bit) {
557 		struct segment_command_64 seg_command = { };
558 
559 		if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
560 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
561 			    __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
562 			return KERN_NO_SPACE;
563 		}
564 
565 		seg_command.cmd = LC_SEGMENT_64;
566 		seg_command.cmdsize = sizeof(seg_command);
567 		seg_command.segname[0] = 0;
568 		seg_command.vmaddr = seg_start;
569 		seg_command.vmsize = size;
570 		seg_command.fileoff = core_context->core_cur_foffset;
571 		seg_command.filesize = size;
572 		seg_command.maxprot = VM_PROT_READ;
573 		seg_command.initprot = VM_PROT_READ;
574 
575 		/* Flush new command to output */
576 		ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
577 		if (ret != KERN_SUCCESS) {
578 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
579 			    __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
580 			    core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
581 			return ret;
582 		}
583 
584 		core_context->core_cur_hoffset += sizeof(seg_command);
585 	} else
586 #endif /* defined(__LP64__) */
587 	{
588 		struct segment_command seg_command = { };
589 
590 		if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) {
591 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
592 			    __func__, seg_start, seg_end, context, seg_start, seg_end);
593 			return KERN_INVALID_ARGUMENT;
594 		}
595 
596 		if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
597 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
598 			    __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
599 			return KERN_NO_SPACE;
600 		}
601 
602 		seg_command.cmd = LC_SEGMENT;
603 		seg_command.cmdsize = sizeof(seg_command);
604 		seg_command.segname[0] = 0;
605 		seg_command.vmaddr = (uint32_t) seg_start;
606 		seg_command.vmsize = (uint32_t) size;
607 		seg_command.fileoff = (uint32_t) core_context->core_cur_foffset;
608 		seg_command.filesize = (uint32_t) size;
609 		seg_command.maxprot = VM_PROT_READ;
610 		seg_command.initprot = VM_PROT_READ;
611 
612 		/* Flush new command to output */
613 		ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
614 		if (ret != KERN_SUCCESS) {
615 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned  error 0x%x\n",
616 			    __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
617 			    core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
618 			return ret;
619 		}
620 
621 		core_context->core_cur_hoffset += sizeof(seg_command);
622 	}
623 
624 	/* Update coredump context */
625 	core_context->core_segments_remaining--;
626 	core_context->core_cur_foffset += size;
627 
628 	return KERN_SUCCESS;
629 }
630 
631 /*
632  * Construct a LC_NOTE command for the specified note
633  */
634 static int
coredump_save_note_description(const char * data_owner,uint64_t length,void * context)635 coredump_save_note_description(const char * data_owner, uint64_t length, void *context)
636 {
637 	processor_core_context *core_context = (processor_core_context *)context;
638 	int ret;
639 
640 	if (data_owner == NULL || (strlen(data_owner) == 0)) {
641 		kern_coredump_log(context, "%s() called with invalid data_owner\n", __func__);
642 		return KERN_INVALID_ARGUMENT;
643 	}
644 
645 	if (core_context->core_notes_remaining == 0) {
646 		kern_coredump_log(context, "%s() called too many times, %llu note descriptions already recorded\n",
647 		    __func__, core_context->core_note_count);
648 		return KERN_INVALID_ARGUMENT;
649 	}
650 
651 	struct note_command note = { .cmd = LC_NOTE,
652 		                     .cmdsize = sizeof(struct note_command),
653 		                     .offset = core_context->core_cur_foffset,
654 		                     .size = length, };
655 	strlcpy((char *) &note.data_owner, data_owner, sizeof(note.data_owner));
656 
657 	/* Flush new command to output */
658 	ret = kdp_core_output(core_context->core_outvars, sizeof(note), (caddr_t)&note);
659 	if (ret != KERN_SUCCESS) {
660 		kern_coredump_log(context, "%s() : failed to write note %llu of %llu : kdp_core_output() returned  error 0x%x\n",
661 		    __func__, core_context->core_note_count - core_context->core_notes_remaining,
662 		    core_context->core_note_count, ret);
663 		return ret;
664 	}
665 
666 	/* Update coredump context */
667 	core_context->core_cur_foffset += length;
668 	core_context->core_cur_hoffset += sizeof(note);
669 	core_context->core_notes_remaining--;
670 
671 	return KERN_SUCCESS;
672 }
673 
674 /*
675  * Save thread state.
676  *
677  * Passed thread_state is expected to be a struct thread_command
678  */
679 static int
coredump_save_thread_state(void * thread_state,void * context)680 coredump_save_thread_state(void *thread_state, void *context)
681 {
682 	processor_core_context *core_context = (processor_core_context *)context;
683 	struct thread_command *tc = (struct thread_command *)thread_state;
684 	int ret;
685 
686 	if (tc->cmd != LC_THREAD) {
687 		kern_coredump_log(context, "%s() : found %d expected LC_THREAD (%d)\n", __func__, tc->cmd, LC_THREAD);
688 		return KERN_INVALID_ARGUMENT;
689 	}
690 
691 	if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) {
692 		kern_coredump_log(context, "%s() : ran out of space to save threads with %llu of %llu remaining\n", __func__,
693 		    core_context->core_threads_remaining, core_context->core_thread_count);
694 		return KERN_NO_SPACE;
695 	}
696 
697 	ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state);
698 	if (ret != KERN_SUCCESS) {
699 		kern_coredump_log(context, "%s() : failed to write thread data : kdp_core_output() returned 0x%x\n", __func__, ret);
700 		return ret;
701 	}
702 
703 	core_context->core_threads_remaining--;
704 	core_context->core_cur_hoffset += core_context->core_thread_state_size;
705 
706 	return KERN_SUCCESS;
707 }
708 
709 static int
coredump_save_segment_data(void * seg_data,uint64_t length,void * context)710 coredump_save_segment_data(void *seg_data, uint64_t length, void *context)
711 {
712 	int ret;
713 	processor_core_context *core_context = (processor_core_context *)context;
714 
715 	if (length > core_context->core_segment_bytes_remaining) {
716 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
717 		    seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining,
718 		    core_context->core_segment_bytes_remaining);
719 		return KERN_INVALID_ARGUMENT;
720 	}
721 
722 	ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data);
723 	if (ret != KERN_SUCCESS) {
724 		kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
725 		    core_context->core_segment_bytes_remaining, ret);
726 		return ret;
727 	}
728 
729 	core_context->core_segment_bytes_remaining -= length;
730 	core_context->core_cur_foffset += length;
731 
732 	return KERN_SUCCESS;
733 }
734 
735 static int
coredump_save_note_data(void * note_data,uint64_t length,void * context)736 coredump_save_note_data(void *note_data, uint64_t length, void *context)
737 {
738 	int ret;
739 	processor_core_context *core_context = (processor_core_context *)context;
740 
741 	if (length > core_context->core_note_bytes_remaining) {
742 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
743 		    note_data, length, context, core_context->core_note_bytes_total - core_context->core_note_bytes_remaining,
744 		    core_context->core_note_bytes_remaining);
745 		return KERN_INVALID_ARGUMENT;
746 	}
747 
748 	ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)note_data);
749 	if (ret != KERN_SUCCESS) {
750 		kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
751 		    core_context->core_note_bytes_remaining, ret);
752 		return ret;
753 	}
754 
755 	core_context->core_note_bytes_remaining -= length;
756 	core_context->core_cur_foffset += length;
757 
758 	return KERN_SUCCESS;
759 }
760 
761 static int
coredump_save_sw_vers_legacy(void * sw_vers,uint64_t length,void * context)762 coredump_save_sw_vers_legacy(void *sw_vers, uint64_t length, void *context)
763 {
764 	processor_core_context *core_context = (processor_core_context *)context;
765 	int ret;
766 
767 	if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) {
768 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with invalid length %llu\n", __func__,
769 		    sw_vers, length, context, length);
770 		return KERN_INVALID_ARGUMENT;
771 	}
772 
773 	uint32_t version = LEGACY_BIN_SPEC_VERSION;
774 	ret = coredump_save_note_data(&version, sizeof(version), context);
775 	if (ret != KERN_SUCCESS) {
776 		kern_coredump_log(context, "%s() : failed to write legacy bin spec version : coredump_save_note_data() returned 0x%x\n",
777 		    __func__, ret);
778 		return ret;
779 	}
780 
781 	ret = coredump_save_note_data(sw_vers, length, context);
782 	if (ret != KERN_SUCCESS) {
783 		kern_coredump_log(context, "%s() : failed to write sw_vers string : coredump_save_note_data() returned 0x%x\n",
784 		    __func__, ret);
785 		return ret;
786 	}
787 
788 	if (length < KERN_COREDUMP_VERSIONSTRINGMAXSIZE) {
789 		/* Zero fill to the full size */
790 		uint64_t length_to_zero = (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length);
791 		ret = kdp_core_output(core_context->core_outvars, length_to_zero, NULL);
792 		if (ret != KERN_SUCCESS) {
793 			kern_coredump_log(context, "%s() : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
794 			    __func__, core_context->core_outvars, length_to_zero, ret);
795 			return ret;
796 		}
797 
798 		core_context->core_note_bytes_remaining -= length_to_zero;
799 		core_context->core_cur_foffset += length_to_zero;
800 	}
801 
802 	return KERN_SUCCESS;
803 }
804 
805 static int
coredump_save_sw_vers(uint64_t address,uuid_t uuid,uint32_t log2_pagesize,void * context)806 coredump_save_sw_vers(uint64_t address, uuid_t uuid, uint32_t log2_pagesize, void *context)
807 {
808 	processor_core_context *core_context = (processor_core_context *)context;
809 	int ret;
810 
811 	uint32_t type = bin_spec_map[core_context->core_type];
812 	main_bin_spec_note_t spec = {
813 		.version = MAIN_BIN_SPEC_VERSION,
814 		.type = type,
815 		.address = address,
816 		.log2_pagesize = log2_pagesize,
817 	};
818 	uuid_copy(*((uuid_t *)&spec.uuid), uuid);
819 
820 	ret = coredump_save_note_data(&spec, sizeof(spec), context);
821 	if (ret != KERN_SUCCESS) {
822 		kern_coredump_log(context, "%s() : failed to write main bin spec structure : coredump_save_note_data() returned 0x%x\n", __func__, ret);
823 		return ret;
824 	}
825 
826 	return KERN_SUCCESS;
827 }
828 
829 static kern_return_t
kern_coredump_routine(void * core_outvars,struct kern_coredump_core * current_core,uint64_t core_begin_offset,uint64_t * core_file_length,boolean_t * header_update_failed,kern_coredump_type_t type,uint64_t details_flags)830 kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_core, uint64_t core_begin_offset, uint64_t *core_file_length, boolean_t *header_update_failed, kern_coredump_type_t type, uint64_t details_flags)
831 {
832 #if CONFIG_CPU_COUNTERS
833 	uint64_t start_cycles;
834 	uint64_t end_cycles;
835 #endif // CONFIG_CPU_COUNTERS
836 	kern_return_t ret;
837 	processor_core_context context = { };
838 	*core_file_length = 0;
839 	*header_update_failed = FALSE;
840 
841 #if CONFIG_CPU_COUNTERS
842 	start_cycles = mt_cur_cpu_cycles();
843 #endif // CONFIG_CPU_COUNTERS
844 
845 	/* Setup the coredump context */
846 	context.core_outvars = core_outvars;
847 	context.core_config = &current_core->kcc_cb;
848 	context.core_refcon = current_core->kcc_refcon;
849 	context.core_is64bit = current_core->kcc_is64bit;
850 	context.core_mh_magic = current_core->kcc_mh_magic;
851 	context.core_cpu_type = current_core->kcc_cpu_type;
852 	context.core_cpu_subtype = current_core->kcc_cpu_subtype;
853 	context.core_type = type;
854 
855 	kern_coredump_log(&context, "\nBeginning coredump of %s\n", current_core->kcc_corename);
856 
857 	if (current_core->kcc_cb.kcc_coredump_init != NULL) {
858 		ret = current_core->kcc_cb.kcc_coredump_init(context.core_refcon, &context);
859 		if (ret == KERN_NODE_DOWN) {
860 			kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
861 			return KERN_SUCCESS;
862 		} else if (ret != KERN_SUCCESS) {
863 			kern_coredump_log(&context, "(%s) : coredump_init failed with %d\n", __func__, ret);
864 			return ret;
865 		}
866 	}
867 
868 	/* Retrieve information about LC_NOTE data we will write out as part of the core before we populate the general header */
869 	if (current_core->kcc_cb.kcc_coredump_save_note_summary != NULL) {
870 		ret = current_core->kcc_cb.kcc_coredump_save_note_summary(context.core_refcon, coredump_save_note_summary, &context);
871 		if (ret != KERN_SUCCESS) {
872 			kern_coredump_log(&context, "(%s) : save_note_note_summary failed with %d\n", __func__, ret);
873 			return ret;
874 		}
875 	}
876 
877 	/* Populate the context with metadata about the corefile (cmd info, sizes etc) */
878 	ret = current_core->kcc_cb.kcc_coredump_get_summary(context.core_refcon, coredump_save_summary, &context);
879 	if (ret != KERN_SUCCESS) {
880 		kern_coredump_log(&context, "(%s) : get_summary failed with %d\n", __func__, ret);
881 		return ret;
882 	}
883 
884 	if (context.core_should_be_skipped) {
885 		kern_coredump_log(&context, "Skipping coredump\n");
886 		return KERN_SUCCESS;
887 	}
888 
889 	if (context.core_header_size == 0) {
890 		kern_coredump_log(&context, "(%s) : header size not populated after coredump_get_summary\n", __func__);
891 		return KERN_FAILURE;
892 	}
893 
894 	/* Save the segment descriptions for the segments to be included */
895 	ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions,
896 	    &context);
897 	if (ret != KERN_SUCCESS) {
898 		kern_coredump_log(&context, "(%s) : save_segment_descriptions failed with %d\n", __func__, ret);
899 		return ret;
900 	}
901 
902 	if (context.core_segments_remaining != 0) {
903 		kern_coredump_log(&context, "(%s) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
904 		    __func__, context.core_segments_remaining, context.core_segment_count);
905 		return KERN_FAILURE;
906 	}
907 
908 	/* write out the LC_NOTE with the binary info */
909 	if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
910 		ret = coredump_save_note_description(MAIN_BIN_SPEC_DATA_OWNER, sizeof(main_bin_spec_note_t), &context);
911 	} else {
912 		ret = coredump_save_note_description(DATA_OWNER_LEGACY_BIN_SPEC, sizeof(legacy_bin_spec), &context);
913 	}
914 	if (ret != KERN_SUCCESS) {
915 		kern_coredump_log(&context, "(%s) : coredump_save_note_description returned %d while writing binary info LC_NOTE description", __func__, ret);
916 		return ret;
917 	}
918 
919 	/* Save LC_NOTE desciptions for any additional notes to be included */
920 	if (current_core->kcc_cb.kcc_coredump_save_note_descriptions != NULL) {
921 		ret = current_core->kcc_cb.kcc_coredump_save_note_descriptions(context.core_refcon, coredump_save_note_description, &context);
922 		if (ret != KERN_SUCCESS) {
923 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_descriptions failed with %d\n", __func__, ret);
924 			return ret;
925 		}
926 	}
927 
928 	if (context.core_notes_remaining != 0) {
929 		kern_coredump_log(&context, "(%s) : save_note_descriptions returned without all note descriptions written, %llu of %llu remaining\n",
930 		    __func__, context.core_notes_remaining, context.core_note_count);
931 		return KERN_FAILURE;
932 	}
933 
934 	/*
935 	 * Save the thread commands/state
936 	 *
937 	 * TODO: Should this buffer be allocated at boot rather than on the stack?
938 	 */
939 	if (context.core_thread_state_size) {
940 		char threadstatebuf[context.core_thread_state_size];
941 		ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state,
942 		    &context);
943 		if (ret != KERN_SUCCESS) {
944 			kern_coredump_log(&context, "(%s) : save_thread_state failed with %d\n", __func__, ret);
945 			return ret;
946 		}
947 	}
948 
949 	if (context.core_threads_remaining != 0) {
950 		kern_coredump_log(&context, "(%s) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
951 		    __func__, context.core_threads_remaining, context.core_thread_count);
952 		return KERN_FAILURE;
953 	}
954 	assert(context.core_cur_hoffset == context.core_header_size);
955 
956 	/* Zero fill between the end of the header and the beginning of the segment data file offset */
957 	ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL);
958 	if (ret != KERN_SUCCESS) {
959 		kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
960 		    context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret);
961 		return ret;
962 	}
963 
964 	/* Reset our local current file offset before we start writing out segment data */
965 	context.core_cur_foffset = round_page(context.core_header_size);
966 
967 	ret = current_core->kcc_cb.kcc_coredump_save_segment_data(context.core_refcon, coredump_save_segment_data, &context);
968 	if (ret != KERN_SUCCESS) {
969 		kern_coredump_log(&context, "coredump_save_segment_data failed with %d\n", ret);
970 		return ret;
971 	}
972 
973 	if (context.core_segment_bytes_remaining != 0) {
974 		kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
975 		    context.core_segment_bytes_remaining, context.core_segment_byte_total);
976 		return KERN_FAILURE;
977 	}
978 
979 	/* Save out the LC_NOTE segment data, starting with the binary info / sw vers one */
980 	if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
981 		ret = current_core->kcc_cb.kcc_coredump_save_sw_vers_detail(context.core_refcon, coredump_save_sw_vers, &context);
982 		if (ret != KERN_SUCCESS) {
983 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers_detail_cb failed with 0x%x\n", __func__, ret);
984 			return ret;
985 		}
986 	} else {
987 #pragma clang diagnostic push
988 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
989 		ret = current_core->kcc_cb.kcc_coredump_save_sw_vers(context.core_refcon, coredump_save_sw_vers_legacy, &context);
990 #pragma clang diagnostic pop
991 		if (ret != KERN_SUCCESS) {
992 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers failed with 0x%x\n", __func__, ret);
993 			return ret;
994 		}
995 	}
996 
997 	if (current_core->kcc_cb.kcc_coredump_save_note_data != NULL) {
998 		ret = current_core->kcc_cb.kcc_coredump_save_note_data(context.core_refcon, coredump_save_note_data, &context);
999 		if (ret != KERN_SUCCESS) {
1000 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data failed with 0x%x\n", __func__, ret);
1001 			return ret;
1002 		}
1003 	}
1004 
1005 	if (context.core_note_bytes_remaining != 0) {
1006 		kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data returned without all note data written, %llu of %llu remaining\n",
1007 		    __func__, context.core_note_bytes_remaining, context.core_note_bytes_total);
1008 		return KERN_FAILURE;
1009 	}
1010 
1011 
1012 	/* Flush the last data out */
1013 	ret = kdp_core_output(context.core_outvars, 0, NULL);
1014 	if (ret != KERN_SUCCESS) {
1015 		kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
1016 		    context.core_outvars, ret);
1017 		return ret;
1018 	}
1019 
1020 	kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
1021 	    current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count,
1022 	    (context.core_thread_count * context.core_thread_state_size), context.core_file_length);
1023 
1024 #if CONFIG_CPU_COUNTERS
1025 	end_cycles = mt_cur_cpu_cycles();
1026 	kern_coredump_log(&context, "\nCore dump took %llu cycles\n", end_cycles - start_cycles);
1027 #endif // CONFIG_CPU_COUNTERS
1028 
1029 	if (core_begin_offset) {
1030 		/* If we're writing to disk (we have a begin offset), we need to update the header */
1031 		ret = kern_dump_record_file(context.core_outvars, current_core->kcc_corename, core_begin_offset, &context.core_file_length_compressed, details_flags);
1032 		if (ret != KERN_SUCCESS) {
1033 			*header_update_failed = TRUE;
1034 			kern_coredump_log(&context, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret);
1035 			return ret;
1036 		}
1037 	}
1038 
1039 	kern_coredump_log(&context, " Compressed file length is %llu bytes\n", context.core_file_length_compressed);
1040 
1041 	*core_file_length = context.core_file_length_compressed;
1042 
1043 	return KERN_SUCCESS;
1044 }
1045 
1046 /*
1047  * Collect coprocessor and userspace coredumps
1048  */
1049 static kern_return_t
kern_do_auxiliary_coredump(void * core_outvars,struct kern_coredump_core * list,uint64_t * last_file_offset,uint64_t details_flags)1050 kern_do_auxiliary_coredump(void * core_outvars, struct kern_coredump_core * list, uint64_t * last_file_offset, uint64_t details_flags)
1051 {
1052 	struct kern_coredump_core *current_core = list;
1053 	uint64_t prev_core_length = 0;
1054 	boolean_t header_update_failed = FALSE;
1055 	kern_coredump_type_t type = current_core == kern_userspace_coredump_core_list ? USERSPACE_COREDUMP : COPROCESSOR_COREDUMP;
1056 	kern_return_t ret = KERN_SUCCESS;
1057 	kern_return_t cur_ret = KERN_SUCCESS;
1058 
1059 	if (type == USERSPACE_COREDUMP && kdp_lck_mtx_lock_spin_is_acquired(&kern_userspace_coredump_core_list_lock)) {
1060 		// Userspace coredump list was being modified at the time of the panic. Skip collecting userspace coredumps
1061 		kern_coredump_log(NULL, "Skipping userspace coredump, coredump list is locked\n");
1062 		return KERN_FAILURE;
1063 	}
1064 
1065 	while (current_core) {
1066 		/* Seek to the beginning of the next file */
1067 		cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
1068 		if (cur_ret != KERN_SUCCESS) {
1069 			kern_coredump_log(NULL, "Failed to seek to beginning of next core\n");
1070 			return KERN_FAILURE;
1071 		}
1072 
1073 		cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, &header_update_failed, type, details_flags);
1074 		if (cur_ret != KERN_SUCCESS) {
1075 			// As long as we didn't fail while updating the header for the raw file, we should be able to try
1076 			// to capture other corefiles.
1077 			if (header_update_failed) {
1078 				// The header may be in an inconsistent state, so bail now
1079 				return KERN_FAILURE;
1080 			} else {
1081 				// Try to capture other corefiles even if one failed, update the overall return
1082 				// status though
1083 				prev_core_length = 0;
1084 				ret = KERN_FAILURE;
1085 			}
1086 		}
1087 
1088 		/* Calculate the offset of the beginning of the next core in the raw file */
1089 		*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1090 		prev_core_length = 0;
1091 		current_core = current_core->kcc_next;
1092 	}
1093 
1094 	return ret;
1095 }
1096 
1097 kern_return_t
kern_do_coredump(void * core_outvars,boolean_t kernel_only,uint64_t first_file_offset,uint64_t * last_file_offset,uint64_t details_flags)1098 kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_offset, uint64_t *last_file_offset, uint64_t details_flags)
1099 {
1100 	uint64_t prev_core_length = 0;
1101 	kern_return_t cur_ret = KERN_SUCCESS, ret = KERN_SUCCESS;
1102 	boolean_t header_update_failed = FALSE;
1103 
1104 	assert(last_file_offset != NULL);
1105 
1106 	*last_file_offset = first_file_offset;
1107 	cur_ret = kern_coredump_routine(core_outvars, kernel_helper, *last_file_offset, &prev_core_length, &header_update_failed, XNU_COREDUMP, details_flags);
1108 	if (cur_ret != KERN_SUCCESS) {
1109 		// As long as we didn't fail while updating the header for the raw file, we should be able to try
1110 		// to capture other corefiles.
1111 		if (header_update_failed) {
1112 			// The header may be in an inconsistent state, so bail now
1113 			return KERN_FAILURE;
1114 		} else {
1115 			prev_core_length = 0;
1116 			ret = KERN_FAILURE;
1117 		}
1118 	}
1119 
1120 	*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1121 
1122 	if (kernel_only) {
1123 		return ret;
1124 	}
1125 
1126 	/* Dump secure kernel if allowed */
1127 	if (sk_helper) {
1128 		/* Seek to the beginning of next file. */
1129 		cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
1130 		if (cur_ret != KERN_SUCCESS) {
1131 			kern_coredump_log(NULL, "secure_core: Unable to seek to the start of file: %d\n", cur_ret);
1132 			return KERN_FAILURE;
1133 		}
1134 
1135 		/* Dump the secure core to disk. */
1136 		cur_ret = kern_coredump_routine(core_outvars, sk_helper, *last_file_offset, &prev_core_length, &header_update_failed, SECURE_COREDUMP, details_flags);
1137 		if (cur_ret != KERN_SUCCESS) {
1138 			if (header_update_failed) {
1139 				return KERN_FAILURE;
1140 			} else {
1141 				prev_core_length = 0;
1142 				ret = KERN_FAILURE;
1143 			}
1144 		}
1145 
1146 		*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1147 	}
1148 
1149 	// Collect coprocessor coredumps first, in case userspace coredumps fail
1150 	ret = kern_do_auxiliary_coredump(core_outvars, kern_coredump_core_list, last_file_offset, details_flags);
1151 	if (ret != KERN_SUCCESS) {
1152 		kern_coredump_log(NULL, "Failed to dump coprocessor cores\n");
1153 		return ret;
1154 	}
1155 
1156 	ret = kern_do_auxiliary_coredump(core_outvars, kern_userspace_coredump_core_list, last_file_offset, details_flags);
1157 	if (ret != KERN_SUCCESS) {
1158 		kern_coredump_log(NULL, "Failed to dump userspace process cores\n");
1159 		return ret;
1160 	}
1161 
1162 	return KERN_SUCCESS;
1163 }
1164 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1165 
1166 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)1167 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks, void* refcon,
1168     const char *core_description, boolean_t is64bit, uint32_t mh_magic,
1169     cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
1170 {
1171 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
1172 	return KERN_NOT_SUPPORTED;
1173 }
1174 
1175 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)1176 kern_register_userspace_coredump(task_t task, const char * name)
1177 {
1178 	(void)task;
1179 	(void)name;
1180 	return KERN_NOT_SUPPORTED;
1181 }
1182 
1183 kern_return_t
kern_unregister_userspace_coredump(task_t task)1184 kern_unregister_userspace_coredump(task_t task)
1185 {
1186 	(void)task;
1187 	return KERN_NOT_SUPPORTED;
1188 }
1189 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1190 
1191 /*
1192  * Must be callable with a NULL context
1193  */
1194 void
kern_coredump_log(void * context,const char * string,...)1195 kern_coredump_log(void *context, const char *string, ...)
1196 {
1197 #pragma unused(context)
1198 	va_list coredump_log_args;
1199 
1200 	va_start(coredump_log_args, string);
1201 	_doprnt(string, &coredump_log_args, consdebug_putc, 16);
1202 	va_end(coredump_log_args);
1203 
1204 #if defined(__arm64__)
1205 	paniclog_flush();
1206 #endif
1207 }
1208