xref: /xnu-12377.81.4/osfmk/kdp/processor_core.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kdp/core_notes.h>
32 #include <kern/assert.h>
33 #include <kern/monotonic.h>
34 #include <kern/zalloc.h>
35 #include <libkern/kernel_mach_header.h>
36 #include <libkern/OSAtomic.h>
37 #include <libsa/types.h>
38 #include <pexpert/pexpert.h>
39 #include <vm/vm_map.h>
40 
41 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
42 
43 #define roundup(x, y)   ((((x) % (y)) == 0) ? \
44 	                (x) : ((x) + ((y) - ((x) % (y)))))
45 
46 #define DATA_OWNER_LEGACY_BIN_SPEC "kern ver str"
47 /*
48  * Format of the legacy bin spec (LC_IDENT-like) LC_NOTE payload as expected by LLDB
49  */
50 typedef struct {
51 	uint32_t version; // currently 1
52 	char version_string[KERN_COREDUMP_VERSIONSTRINGMAXSIZE];
53 } __attribute__((packed)) legacy_bin_spec;
54 #define LEGACY_BIN_SPEC_VERSION 1
55 
56 static uint32_t bin_spec_map[NUM_COREDUMP_TYPES] = {
57 	[XNU_COREDUMP] = MAIN_BIN_SPEC_TYPE_KERNEL,
58 	[USERSPACE_COREDUMP] = MAIN_BIN_SPEC_TYPE_USER,
59 	[COPROCESSOR_COREDUMP] = MAIN_BIN_SPEC_TYPE_STANDALONE,
60 	[SECURE_COREDUMP] = MAIN_BIN_SPEC_TYPE_STANDALONE
61 };
62 
63 /*
64  * The processor_core_context structure describes the current
65  * corefile that's being generated. It also includes a pointer
66  * to the core_outvars which is used by the KDP code for context
67  * about the specific output mechanism being used.
68  *
69  * We include *remaining variables to catch inconsistencies / bugs
70  * in the co-processor coredump callbacks.
71  */
72 typedef struct {
73 	struct kdp_core_out_vars * core_outvars;     /* Output procedure info (see kdp_out_stage.h) */
74 	kern_coredump_callback_config *core_config;  /* Information about core currently being dumped */
75 	void *core_refcon;                           /* Reference constant associated with the coredump helper */
76 	boolean_t core_should_be_skipped;            /* Indicates whether this specific core should not be dumped */
77 	boolean_t core_is64bit;                      /* Bitness of CPU */
78 	kern_coredump_type_t core_type;              /* Indicates type of this core*/
79 	uint32_t core_mh_magic;                      /* Magic for mach header */
80 	cpu_type_t core_cpu_type;                    /* CPU type for mach header */
81 	cpu_subtype_t core_cpu_subtype;              /* CPU subtype for mach header */
82 	uint64_t core_file_length;                   /* Overall corefile length including any zero padding */
83 	uint64_t core_file_length_compressed;        /* File length after compression */
84 	uint64_t core_segment_count;                 /* Number of LC_SEGMENTs in the core currently being dumped */
85 	uint64_t core_segments_remaining;            /* Number of LC_SEGMENTs that have not been added to the header */
86 	uint64_t core_segment_byte_total;            /* Sum of all the data from the LC_SEGMENTS in the core */
87 	uint64_t core_segment_bytes_remaining;       /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
88 	uint64_t core_thread_count;                  /* Number of LC_THREADs to be included */
89 	uint64_t core_threads_remaining;             /* Number of LC_THREADs that have yet to be included */
90 	uint64_t core_thread_state_size;             /* Size of each LC_THREAD */
91 	uint64_t core_note_count;                    /* Number of LC_NOTEs to be included */
92 	uint64_t core_notes_remaining;               /* Number of LC_NOTEs that have not been added to the header */
93 	uint64_t core_note_bytes_total;              /* Sum of all data from the LC_NOTE segments in the core */
94 	uint64_t core_note_bytes_remaining;          /* Quantity of data remaining from LC_NOTEs that have yet to be added */
95 	uint64_t core_cur_hoffset;                   /* Current offset in this core's header */
96 	uint64_t core_cur_foffset;                   /* Current offset in this core's overall file */
97 	uint64_t core_header_size;                   /* Size of this core's header */
98 	uint64_t core_total_bytes;                   /* Total amount of data to be included in this core (excluding zero fill) */
99 	const char *core_name;                       /* Name of corefile being produced */
100 } processor_core_context;
101 
102 /*
103  * The kern_coredump_core structure describes a core that has been
104  * registered for use by the coredump mechanism.
105  */
106 struct kern_coredump_core {
107 	struct kern_coredump_core *kcc_next;             /* Next processor to dump */
108 	void *kcc_refcon;                                /* Reference constant to be passed to callbacks */
109 	char kcc_corename[MACH_CORE_FILEHEADER_NAMELEN]; /* Description of this processor */
110 	boolean_t kcc_is64bit;                           /* Processor bitness */
111 	uint32_t kcc_mh_magic;                           /* Magic for mach header */
112 	cpu_type_t kcc_cpu_type;                         /* CPU type for mach header */
113 	cpu_subtype_t kcc_cpu_subtype;                   /* CPU subtype for mach header */
114 	kern_coredump_callback_config kcc_cb;            /* Registered processor callbacks for coredump */
115 };
116 
117 struct kern_coredump_core * kern_coredump_core_list = NULL;
118 struct kern_coredump_core * kern_userspace_coredump_core_list = NULL;
119 LCK_GRP_DECLARE(kern_userspace_coredump_core_list_lock_grp, "userspace coredump list");
120 LCK_MTX_DECLARE(kern_userspace_coredump_core_list_lock, &kern_userspace_coredump_core_list_lock_grp);
121 
122 typedef kern_return_t (*legacy_sw_vers_registered_cb)(void *refcon, core_save_sw_vers_cb callback, void *context);
123 
124 uint32_t coredump_registered_count = 0;
125 
126 struct kern_coredump_core *kernel_helper = NULL;
127 struct kern_coredump_core *sk_helper = NULL;
128 
129 static struct kern_coredump_core *
kern_register_coredump_helper_internal(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,kern_coredump_type_t type,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)130 kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
131     void *refcon, const char *core_description, kern_coredump_type_t type, boolean_t is64bit,
132     uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
133 {
134 	struct kern_coredump_core *core_helper = NULL;
135 	kern_coredump_callback_config *core_callbacks = NULL;
136 
137 	if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) {
138 		return NULL;
139 	}
140 	if (kc_callbacks == NULL) {
141 		return NULL;
142 	}
143 	;
144 	if (core_description == NULL) {
145 		return NULL;
146 	}
147 
148 	if (kc_callbacks->kcc_coredump_get_summary == NULL ||
149 	    kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
150 	    kc_callbacks->kcc_coredump_save_segment_data == NULL ||
151 	    kc_callbacks->kcc_coredump_save_thread_state == NULL) {
152 		return NULL;
153 	}
154 
155 #pragma clang diagnostic push
156 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
157 	legacy_sw_vers_registered_cb legacy_vers_callback = kc_callbacks->kcc_coredump_save_sw_vers;
158 #pragma clang diagnostic pop
159 
160 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
161 		if (legacy_vers_callback == NULL &&
162 		    kc_callbacks->kcc_coredump_save_sw_vers_detail == NULL) {
163 			return NULL;
164 		}
165 	} else {
166 		if (legacy_vers_callback == NULL) {
167 			return NULL;
168 		}
169 	}
170 
171 
172 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
173 		/* Either all note related callbacks should be set or none should be set */
174 		if ((kc_callbacks->kcc_coredump_save_note_summary == NULL) != (kc_callbacks->kcc_coredump_save_note_descriptions == NULL)) {
175 			return NULL;
176 		}
177 		if ((kc_callbacks->kcc_coredump_save_note_descriptions == NULL) != (kc_callbacks->kcc_coredump_save_note_data == NULL)) {
178 			return NULL;
179 		}
180 	}
181 
182 
183 #if !defined(__LP64__)
184 	/* We don't support generating 64-bit cores on 32-bit platforms */
185 	if (is64bit) {
186 		return NULL;
187 	}
188 #endif
189 
190 	core_helper = zalloc_permanent_type(struct kern_coredump_core);
191 	core_helper->kcc_next = NULL;
192 	core_helper->kcc_refcon = refcon;
193 	if (type == XNU_COREDUMP || type == USERSPACE_COREDUMP || type == SECURE_COREDUMP) {
194 		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
195 	} else {
196 		assert(type == COPROCESSOR_COREDUMP);
197 		/* Make sure there's room for the -cp suffix (16 - NULL char - strlen(-cp)) */
198 		snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.12s-cp", core_description);
199 	}
200 	core_helper->kcc_is64bit = is64bit;
201 	core_helper->kcc_mh_magic = mh_magic;
202 	core_helper->kcc_cpu_type = cpu_type;
203 	core_helper->kcc_cpu_subtype = cpu_subtype;
204 	core_callbacks = &core_helper->kcc_cb;
205 
206 	core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
207 	core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
208 	core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
209 	core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
210 	core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
211 #pragma clang diagnostic push
212 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
213 	core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;
214 #pragma clang diagnostic pop
215 
216 
217 	if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
218 		core_callbacks->kcc_coredump_save_note_summary = kc_callbacks->kcc_coredump_save_note_summary;
219 		core_callbacks->kcc_coredump_save_note_descriptions = kc_callbacks->kcc_coredump_save_note_descriptions;
220 		core_callbacks->kcc_coredump_save_note_data = kc_callbacks->kcc_coredump_save_note_data;
221 		core_callbacks->kcc_coredump_save_sw_vers_detail = kc_callbacks->kcc_coredump_save_sw_vers_detail;
222 	}
223 
224 	if (type == XNU_COREDUMP) {
225 		assert(kernel_helper == NULL);
226 		kernel_helper = core_helper;
227 	} else if (type == SECURE_COREDUMP) {
228 		assert(sk_helper == NULL);
229 		sk_helper = core_helper;
230 	} else if (type == USERSPACE_COREDUMP) {
231 		lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
232 		core_helper->kcc_next = kern_userspace_coredump_core_list;
233 		kern_userspace_coredump_core_list = core_helper;
234 		lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
235 	} else {
236 		assert(type == COPROCESSOR_COREDUMP);
237 		do {
238 			core_helper->kcc_next = kern_coredump_core_list;
239 		} while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
240 	}
241 
242 	OSAddAtomic(1, &coredump_registered_count);
243 	kprintf("Registered coredump handler for %s\n", core_description);
244 
245 	return core_helper;
246 }
247 
248 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)249 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
250     void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic,
251     cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
252 {
253 	if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) {
254 		return KERN_RESOURCE_SHORTAGE;
255 	}
256 
257 	if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, COPROCESSOR_COREDUMP,
258 	    is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) {
259 		return KERN_INVALID_ARGUMENT;
260 	}
261 
262 	return KERN_SUCCESS;
263 }
264 
265 kern_return_t
kern_register_xnu_coredump_helper(kern_coredump_callback_config * kc_callbacks)266 kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks)
267 {
268 #if defined(__LP64__)
269 	boolean_t is64bit = TRUE;
270 #else
271 	boolean_t is64bit = FALSE;
272 #endif
273 
274 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", XNU_COREDUMP, is64bit,
275 	    _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
276 		return KERN_FAILURE;
277 	}
278 
279 	return KERN_SUCCESS;
280 }
281 
282 kern_return_t
kern_register_sk_coredump_helper(kern_coredump_callback_config * sk_callbacks,void * refcon)283 kern_register_sk_coredump_helper(kern_coredump_callback_config *sk_callbacks, void *refcon)
284 {
285 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, sk_callbacks,
286 	    refcon, "secure-kernel", SECURE_COREDUMP, TRUE, _mh_execute_header.magic,
287 	    _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
288 		return KERN_FAILURE;
289 	}
290 
291 	return KERN_SUCCESS;
292 }
293 
294 extern cpu_type_t
295 process_cpu_type(void * bsd_info);
296 
297 extern cpu_type_t
298 process_cpu_subtype(void * bsd_info);
299 
300 extern char     *proc_name_address(void *p);
301 
302 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)303 kern_register_userspace_coredump(task_t task, const char * name)
304 {
305 	kern_return_t result;
306 	struct kern_userspace_coredump_context * context = NULL;
307 	boolean_t is64bit;
308 	uint32_t mh_magic;
309 	uint32_t mh_cputype;
310 	uint32_t mh_cpusubtype;
311 	kern_coredump_callback_config userkc_callbacks;
312 
313 	is64bit = task_has_64Bit_addr(task);
314 	mh_magic = is64bit ? MH_MAGIC_64 : MH_MAGIC;
315 	mh_cputype = process_cpu_type(get_bsdtask_info(task));
316 	mh_cpusubtype = process_cpu_subtype(get_bsdtask_info(task));
317 
318 
319 	context = kalloc_type(struct kern_userspace_coredump_context, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
320 	context->task = task;
321 
322 	userkc_callbacks.kcc_coredump_init = user_dump_init;
323 	userkc_callbacks.kcc_coredump_get_summary = user_dump_save_summary;
324 	userkc_callbacks.kcc_coredump_save_segment_descriptions = user_dump_save_seg_descriptions;
325 	userkc_callbacks.kcc_coredump_save_thread_state = user_dump_save_thread_state;
326 	userkc_callbacks.kcc_coredump_save_sw_vers_detail = user_dump_save_sw_vers_detail;
327 	userkc_callbacks.kcc_coredump_save_segment_data = user_dump_save_segment_data;
328 	userkc_callbacks.kcc_coredump_save_note_summary = user_dump_save_note_summary;
329 	userkc_callbacks.kcc_coredump_save_note_descriptions = user_dump_save_note_descriptions;
330 	userkc_callbacks.kcc_coredump_save_note_data = user_dump_save_note_data;
331 
332 	if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, &userkc_callbacks, context, name, USERSPACE_COREDUMP, is64bit,
333 	    mh_magic, mh_cputype, mh_cpusubtype) == NULL) {
334 		result = KERN_FAILURE;
335 		goto finish;
336 	}
337 
338 	result = KERN_SUCCESS;
339 
340 finish:
341 	if (result != KERN_SUCCESS && context != NULL) {
342 		kfree_type(struct kern_userspace_coredump_context, context);
343 	}
344 
345 	return result;
346 }
347 
348 kern_return_t
kern_unregister_userspace_coredump(task_t task)349 kern_unregister_userspace_coredump(task_t task)
350 {
351 	struct kern_coredump_core * current_core = NULL;
352 	struct kern_coredump_core * previous_core = NULL;
353 
354 	lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
355 	current_core = kern_userspace_coredump_core_list;
356 	while (current_core) {
357 		struct kern_userspace_coredump_context * context = (struct kern_userspace_coredump_context *)current_core->kcc_refcon;
358 		assert(context != NULL);
359 		if (context->task == task) {
360 			/* remove current_core from the list */
361 			if (previous_core == NULL) {
362 				kern_userspace_coredump_core_list = current_core->kcc_next;
363 			} else {
364 				previous_core->kcc_next = current_core->kcc_next;
365 			}
366 			break;
367 		}
368 		previous_core = current_core;
369 		current_core = current_core->kcc_next;
370 	}
371 	lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
372 
373 	if (current_core) {
374 		kfree_type(struct kern_userspace_coredump_context, current_core->kcc_refcon);
375 		OSAddAtomic(-1, &coredump_registered_count);
376 		return KERN_SUCCESS;
377 	}
378 
379 	return KERN_NOT_FOUND;
380 }
381 
382 /*
383  * Save LC_NOTE metadata about the core we are going to write before we write the mach header
384  */
385 static int
coredump_save_note_summary(uint64_t core_note_count,uint64_t core_note_byte_count,void * context)386 coredump_save_note_summary(uint64_t core_note_count, uint64_t core_note_byte_count, void *context)
387 {
388 	processor_core_context *core_context = (processor_core_context *)context;
389 
390 	if (!core_note_count || !core_note_byte_count || !context) {
391 		return KERN_INVALID_ARGUMENT;
392 	}
393 
394 	core_context->core_note_count = core_context->core_notes_remaining = core_note_count;
395 	core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = core_note_byte_count;
396 
397 	return KERN_SUCCESS;
398 }
399 
400 /*
401  * Save metadata about the core we're about to write, write out the mach header
402  */
403 static int
coredump_save_summary(uint64_t core_segment_count,uint64_t core_byte_count,uint64_t thread_count,uint64_t thread_state_size,__unused uint64_t misc_bytes_count,void * context)404 coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count,
405     uint64_t thread_count, uint64_t thread_state_size,
406     __unused uint64_t misc_bytes_count, void *context)
407 {
408 	processor_core_context *core_context = (processor_core_context *)context;
409 	uint32_t sizeofcmds = 0, numcmds = 0;
410 	bool should_skip = false;
411 	int ret = 0;
412 
413 	if (!core_segment_count || !core_byte_count
414 	    || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) {
415 		return KERN_INVALID_ARGUMENT;
416 	}
417 
418 	/*
419 	 * secure coredumps and coprocessor coredumps aren't required to contain any thread state,
420 	 * because it's reconstructed during the lldb session
421 	 */
422 	if (core_context->core_type != SECURE_COREDUMP && core_context->core_type != COPROCESSOR_COREDUMP
423 	    && (!thread_count || !thread_state_size)) {
424 		return KERN_INVALID_ARGUMENT;
425 	}
426 
427 	/* Initialize core_context */
428 	core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count;
429 	core_context->core_segment_bytes_remaining = core_context->core_segment_byte_total = core_byte_count;
430 	core_context->core_threads_remaining = core_context->core_thread_count = thread_count;
431 	core_context->core_thread_state_size = thread_state_size;
432 
433 	/* Account for the LC_NOTE needed to store version/load information */
434 	core_context->core_note_count = core_context->core_notes_remaining = (core_context->core_note_count + 1);
435 	size_t vers_note_length = sizeof(main_bin_spec_note_t);
436 	if (core_context->core_config->kcc_coredump_save_sw_vers_detail == NULL) {
437 		vers_note_length = sizeof(legacy_bin_spec);
438 	}
439 	core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = (core_context->core_note_bytes_total + vers_note_length);
440 
441 #if defined(__LP64__)
442 	if (core_context->core_is64bit) {
443 		sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) +
444 		    (core_context->core_threads_remaining * core_context->core_thread_state_size) +
445 		    (core_context->core_note_count * sizeof(struct note_command)));
446 		core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64);
447 	} else
448 #endif /* defined(__LP64__) */
449 	{
450 		sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) +
451 		    (core_context->core_threads_remaining * core_context->core_thread_state_size) +
452 		    (core_context->core_note_count * sizeof(struct note_command)));
453 		core_context->core_header_size = sizeofcmds + sizeof(struct mach_header);
454 	}
455 
456 
457 	core_context->core_total_bytes = core_context->core_header_size + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
458 	core_context->core_file_length = round_page(core_context->core_header_size) + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
459 	core_context->core_cur_foffset = round_page(core_context->core_header_size);
460 
461 	numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + core_context->core_note_count);
462 
463 	/*
464 	 * Reset the zstream and other output context before writing any data out. We do this here
465 	 * to update the total file length on the outvars before we start writing out.
466 	 */
467 	ret = kdp_reset_output_vars(core_context->core_outvars, core_context->core_file_length, true, &should_skip,
468 	    core_context->core_name, core_context->core_type);
469 	if (ret != KERN_SUCCESS) {
470 		kern_coredump_log(context, "%s() : failed to reset the out vars : kdp_reset_output_vars(%p, %llu, true, %p) returned error 0x%x\n",
471 		    __func__, core_context->core_outvars, core_context->core_file_length, &should_skip, ret);
472 		return ret;
473 	}
474 
475 	if (should_skip) {
476 		core_context->core_should_be_skipped = TRUE;
477 		return KERN_SUCCESS;
478 	}
479 
480 	/* Construct core file header */
481 #if defined(__LP64__)
482 	if (core_context->core_is64bit) {
483 		struct mach_header_64 core_header = { };
484 
485 		core_header.magic = core_context->core_mh_magic;
486 		core_header.cputype = core_context->core_cpu_type;
487 		core_header.cpusubtype = core_context->core_cpu_subtype;
488 		core_header.filetype = MH_CORE;
489 		core_header.ncmds = numcmds;
490 		core_header.sizeofcmds = sizeofcmds;
491 		core_header.flags = 0;
492 
493 		/* Send the core_header to the output procedure */
494 		ret =  kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
495 		if (ret != KERN_SUCCESS) {
496 			kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
497 			    __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
498 			return ret;
499 		}
500 
501 		core_context->core_cur_hoffset += sizeof(core_header);
502 	} else
503 #endif /* defined(__LP64__) */
504 	{
505 		struct mach_header core_header = { };
506 
507 		core_header.magic = core_context->core_mh_magic;
508 		core_header.cputype = core_context->core_cpu_type;
509 		core_header.cpusubtype = core_context->core_cpu_subtype;
510 		core_header.filetype = MH_CORE;
511 		core_header.ncmds = numcmds;
512 		core_header.sizeofcmds = sizeofcmds;
513 		core_header.flags = 0;
514 
515 		/* Send the core_header to the output procedure */
516 		ret =  kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
517 		if (ret != KERN_SUCCESS) {
518 			kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
519 			    __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
520 			return ret;
521 		}
522 
523 		core_context->core_cur_hoffset += sizeof(core_header);
524 	}
525 
526 	return KERN_SUCCESS;
527 }
528 
529 /*
530  * Construct a segment command for the specified segment.
531  */
532 static int
coredump_save_segment_descriptions(uint64_t seg_start,uint64_t seg_end,void * context)533 coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end,
534     void *context)
535 {
536 	processor_core_context *core_context = (processor_core_context *)context;
537 	int ret;
538 	uint64_t size = seg_end - seg_start;
539 
540 	if (seg_end <= seg_start) {
541 		kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
542 		    __func__, seg_start, seg_end, context, seg_start, seg_end);
543 		return KERN_INVALID_ARGUMENT;
544 	}
545 
546 	if (core_context->core_segments_remaining == 0) {
547 		kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
548 		    __func__, seg_start, seg_end, context, core_context->core_segment_count);
549 		return KERN_INVALID_ARGUMENT;
550 	}
551 
552 	/* Construct segment command */
553 #if defined(__LP64__)
554 	if (core_context->core_is64bit) {
555 		struct segment_command_64 seg_command = { };
556 
557 		if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
558 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
559 			    __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
560 			return KERN_NO_SPACE;
561 		}
562 
563 		seg_command.cmd = LC_SEGMENT_64;
564 		seg_command.cmdsize = sizeof(seg_command);
565 		seg_command.segname[0] = 0;
566 		seg_command.vmaddr = seg_start;
567 		seg_command.vmsize = size;
568 		seg_command.fileoff = core_context->core_cur_foffset;
569 		seg_command.filesize = size;
570 		seg_command.maxprot = VM_PROT_READ;
571 		seg_command.initprot = VM_PROT_READ;
572 
573 		/* Flush new command to output */
574 		ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
575 		if (ret != KERN_SUCCESS) {
576 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
577 			    __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
578 			    core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
579 			return ret;
580 		}
581 
582 		core_context->core_cur_hoffset += sizeof(seg_command);
583 	} else
584 #endif /* defined(__LP64__) */
585 	{
586 		struct segment_command seg_command = { };
587 
588 		if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) {
589 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
590 			    __func__, seg_start, seg_end, context, seg_start, seg_end);
591 			return KERN_INVALID_ARGUMENT;
592 		}
593 
594 		if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
595 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
596 			    __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
597 			return KERN_NO_SPACE;
598 		}
599 
600 		seg_command.cmd = LC_SEGMENT;
601 		seg_command.cmdsize = sizeof(seg_command);
602 		seg_command.segname[0] = 0;
603 		seg_command.vmaddr = (uint32_t) seg_start;
604 		seg_command.vmsize = (uint32_t) size;
605 		seg_command.fileoff = (uint32_t) core_context->core_cur_foffset;
606 		seg_command.filesize = (uint32_t) size;
607 		seg_command.maxprot = VM_PROT_READ;
608 		seg_command.initprot = VM_PROT_READ;
609 
610 		/* Flush new command to output */
611 		ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
612 		if (ret != KERN_SUCCESS) {
613 			kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned  error 0x%x\n",
614 			    __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
615 			    core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
616 			return ret;
617 		}
618 
619 		core_context->core_cur_hoffset += sizeof(seg_command);
620 	}
621 
622 	/* Update coredump context */
623 	core_context->core_segments_remaining--;
624 	core_context->core_cur_foffset += size;
625 
626 	return KERN_SUCCESS;
627 }
628 
629 /*
630  * Construct a LC_NOTE command for the specified note
631  */
632 static int
coredump_save_note_description(const char * data_owner,uint64_t length,void * context)633 coredump_save_note_description(const char * data_owner, uint64_t length, void *context)
634 {
635 	processor_core_context *core_context = (processor_core_context *)context;
636 	int ret;
637 
638 	if (data_owner == NULL || (strlen(data_owner) == 0)) {
639 		kern_coredump_log(context, "%s() called with invalid data_owner\n", __func__);
640 		return KERN_INVALID_ARGUMENT;
641 	}
642 
643 	if (core_context->core_notes_remaining == 0) {
644 		kern_coredump_log(context, "%s() called too many times, %llu note descriptions already recorded\n",
645 		    __func__, core_context->core_note_count);
646 		return KERN_INVALID_ARGUMENT;
647 	}
648 
649 	struct note_command note = { .cmd = LC_NOTE,
650 		                     .cmdsize = sizeof(struct note_command),
651 		                     .offset = core_context->core_cur_foffset,
652 		                     .size = length, };
653 	strlcpy((char *) &note.data_owner, data_owner, sizeof(note.data_owner));
654 
655 	/* Flush new command to output */
656 	ret = kdp_core_output(core_context->core_outvars, sizeof(note), (caddr_t)&note);
657 	if (ret != KERN_SUCCESS) {
658 		kern_coredump_log(context, "%s() : failed to write note %llu of %llu : kdp_core_output() returned  error 0x%x\n",
659 		    __func__, core_context->core_note_count - core_context->core_notes_remaining,
660 		    core_context->core_note_count, ret);
661 		return ret;
662 	}
663 
664 	/* Update coredump context */
665 	core_context->core_cur_foffset += length;
666 	core_context->core_cur_hoffset += sizeof(note);
667 	core_context->core_notes_remaining--;
668 
669 	return KERN_SUCCESS;
670 }
671 
672 /*
673  * Save thread state.
674  *
675  * Passed thread_state is expected to be a struct thread_command
676  */
677 static int
coredump_save_thread_state(void * thread_state,void * context)678 coredump_save_thread_state(void *thread_state, void *context)
679 {
680 	processor_core_context *core_context = (processor_core_context *)context;
681 	struct thread_command *tc = (struct thread_command *)thread_state;
682 	int ret;
683 
684 	if (tc->cmd != LC_THREAD) {
685 		kern_coredump_log(context, "%s() : found %d expected LC_THREAD (%d)\n", __func__, tc->cmd, LC_THREAD);
686 		return KERN_INVALID_ARGUMENT;
687 	}
688 
689 	if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) {
690 		kern_coredump_log(context, "%s() : ran out of space to save threads with %llu of %llu remaining\n", __func__,
691 		    core_context->core_threads_remaining, core_context->core_thread_count);
692 		return KERN_NO_SPACE;
693 	}
694 
695 	ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state);
696 	if (ret != KERN_SUCCESS) {
697 		kern_coredump_log(context, "%s() : failed to write thread data : kdp_core_output() returned 0x%x\n", __func__, ret);
698 		return ret;
699 	}
700 
701 	core_context->core_threads_remaining--;
702 	core_context->core_cur_hoffset += core_context->core_thread_state_size;
703 
704 	return KERN_SUCCESS;
705 }
706 
707 static int
coredump_save_segment_data(void * seg_data,uint64_t length,void * context)708 coredump_save_segment_data(void *seg_data, uint64_t length, void *context)
709 {
710 	int ret;
711 	processor_core_context *core_context = (processor_core_context *)context;
712 
713 	if (length > core_context->core_segment_bytes_remaining) {
714 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
715 		    seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining,
716 		    core_context->core_segment_bytes_remaining);
717 		return KERN_INVALID_ARGUMENT;
718 	}
719 
720 	ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data);
721 	if (ret != KERN_SUCCESS) {
722 		kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
723 		    core_context->core_segment_bytes_remaining, ret);
724 		return ret;
725 	}
726 
727 	core_context->core_segment_bytes_remaining -= length;
728 	core_context->core_cur_foffset += length;
729 
730 	return KERN_SUCCESS;
731 }
732 
733 static int
coredump_save_note_data(void * note_data,uint64_t length,void * context)734 coredump_save_note_data(void *note_data, uint64_t length, void *context)
735 {
736 	int ret;
737 	processor_core_context *core_context = (processor_core_context *)context;
738 
739 	if (length > core_context->core_note_bytes_remaining) {
740 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
741 		    note_data, length, context, core_context->core_note_bytes_total - core_context->core_note_bytes_remaining,
742 		    core_context->core_note_bytes_remaining);
743 		return KERN_INVALID_ARGUMENT;
744 	}
745 
746 	ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)note_data);
747 	if (ret != KERN_SUCCESS) {
748 		kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
749 		    core_context->core_note_bytes_remaining, ret);
750 		return ret;
751 	}
752 
753 	core_context->core_note_bytes_remaining -= length;
754 	core_context->core_cur_foffset += length;
755 
756 	return KERN_SUCCESS;
757 }
758 
759 static int
coredump_save_sw_vers_legacy(void * sw_vers,uint64_t length,void * context)760 coredump_save_sw_vers_legacy(void *sw_vers, uint64_t length, void *context)
761 {
762 	processor_core_context *core_context = (processor_core_context *)context;
763 	int ret;
764 
765 	if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) {
766 		kern_coredump_log(context, "%s(%p, %llu, %p) : called with invalid length %llu\n", __func__,
767 		    sw_vers, length, context, length);
768 		return KERN_INVALID_ARGUMENT;
769 	}
770 
771 	uint32_t version = LEGACY_BIN_SPEC_VERSION;
772 	ret = coredump_save_note_data(&version, sizeof(version), context);
773 	if (ret != KERN_SUCCESS) {
774 		kern_coredump_log(context, "%s() : failed to write legacy bin spec version : coredump_save_note_data() returned 0x%x\n",
775 		    __func__, ret);
776 		return ret;
777 	}
778 
779 	ret = coredump_save_note_data(sw_vers, length, context);
780 	if (ret != KERN_SUCCESS) {
781 		kern_coredump_log(context, "%s() : failed to write sw_vers string : coredump_save_note_data() returned 0x%x\n",
782 		    __func__, ret);
783 		return ret;
784 	}
785 
786 	if (length < KERN_COREDUMP_VERSIONSTRINGMAXSIZE) {
787 		/* Zero fill to the full size */
788 		uint64_t length_to_zero = (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length);
789 		ret = kdp_core_output(core_context->core_outvars, length_to_zero, NULL);
790 		if (ret != KERN_SUCCESS) {
791 			kern_coredump_log(context, "%s() : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
792 			    __func__, core_context->core_outvars, length_to_zero, ret);
793 			return ret;
794 		}
795 
796 		core_context->core_note_bytes_remaining -= length_to_zero;
797 		core_context->core_cur_foffset += length_to_zero;
798 	}
799 
800 	return KERN_SUCCESS;
801 }
802 
803 static int
coredump_save_sw_vers(uint64_t address,uuid_t uuid,uint32_t log2_pagesize,void * context)804 coredump_save_sw_vers(uint64_t address, uuid_t uuid, uint32_t log2_pagesize, void *context)
805 {
806 	processor_core_context *core_context = (processor_core_context *)context;
807 	int ret;
808 
809 	uint32_t type = bin_spec_map[core_context->core_type];
810 	main_bin_spec_note_t spec = {
811 		.version = MAIN_BIN_SPEC_VERSION,
812 		.type = type,
813 		.address = address,
814 		.log2_pagesize = log2_pagesize,
815 	};
816 	uuid_copy(*((uuid_t *)&spec.uuid), uuid);
817 
818 	ret = coredump_save_note_data(&spec, sizeof(spec), context);
819 	if (ret != KERN_SUCCESS) {
820 		kern_coredump_log(context, "%s() : failed to write main bin spec structure : coredump_save_note_data() returned 0x%x\n", __func__, ret);
821 		return ret;
822 	}
823 
824 	return KERN_SUCCESS;
825 }
826 
827 static kern_return_t
kern_coredump_routine(void * core_outvars,struct kern_coredump_core * current_core,uint64_t core_begin_offset,uint64_t * core_file_length,boolean_t * abort_on_failure,kern_coredump_type_t type,uint64_t details_flags)828 kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_core, uint64_t core_begin_offset, uint64_t *core_file_length, boolean_t *abort_on_failure, kern_coredump_type_t type, uint64_t details_flags)
829 {
830 #if CONFIG_CPU_COUNTERS
831 	uint64_t start_cycles;
832 	uint64_t end_cycles;
833 #endif // CONFIG_CPU_COUNTERS
834 	kern_return_t ret;
835 	processor_core_context context = { };
836 	*core_file_length = 0;
837 
838 #if CONFIG_CPU_COUNTERS
839 	start_cycles = mt_cur_cpu_cycles();
840 #endif // CONFIG_CPU_COUNTERS
841 
842 	/* Setup the coredump context */
843 	context.core_outvars = core_outvars;
844 	context.core_config = &current_core->kcc_cb;
845 	context.core_refcon = current_core->kcc_refcon;
846 	context.core_is64bit = current_core->kcc_is64bit;
847 	context.core_mh_magic = current_core->kcc_mh_magic;
848 	context.core_cpu_type = current_core->kcc_cpu_type;
849 	context.core_cpu_subtype = current_core->kcc_cpu_subtype;
850 	context.core_type = type;
851 	context.core_name = current_core->kcc_corename;
852 
853 	kern_coredump_log(&context, "\nBeginning coredump of %s\n", current_core->kcc_corename);
854 
855 	if (current_core->kcc_cb.kcc_coredump_init != NULL) {
856 		ret = current_core->kcc_cb.kcc_coredump_init(context.core_refcon, &context);
857 		if (ret == KERN_NODE_DOWN) {
858 			kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
859 			return KERN_SUCCESS;
860 		} else if (ret != KERN_SUCCESS) {
861 			kern_coredump_log(&context, "(%s) : coredump_init failed with %d\n", __func__, ret);
862 			return ret;
863 		}
864 	}
865 
866 	/* Retrieve information about LC_NOTE data we will write out as part of the core before we populate the general header */
867 	if (current_core->kcc_cb.kcc_coredump_save_note_summary != NULL) {
868 		ret = current_core->kcc_cb.kcc_coredump_save_note_summary(context.core_refcon, coredump_save_note_summary, &context);
869 		if (ret != KERN_SUCCESS) {
870 			kern_coredump_log(&context, "(%s) : save_note_note_summary failed with %d\n", __func__, ret);
871 			return ret;
872 		}
873 	}
874 
875 	/* Populate the context with metadata about the corefile (cmd info, sizes etc) */
876 	ret = current_core->kcc_cb.kcc_coredump_get_summary(context.core_refcon, coredump_save_summary, &context);
877 	if (ret != KERN_SUCCESS && ret != KERN_NODE_DOWN) {
878 		kern_coredump_log(&context, "(%s) : get_summary failed with %d\n", __func__, ret);
879 		return ret;
880 	}
881 
882 	if (context.core_should_be_skipped) {
883 		kern_coredump_log(&context, "Skipping coredump\n");
884 		return KERN_SUCCESS;
885 	}
886 
887 	if (context.core_header_size == 0) {
888 		kern_coredump_log(&context, "(%s) : header size not populated after coredump_get_summary\n", __func__);
889 		return KERN_FAILURE;
890 	}
891 
892 	/* Save the segment descriptions for the segments to be included */
893 	ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions,
894 	    &context);
895 	if (ret != KERN_SUCCESS) {
896 		kern_coredump_log(&context, "(%s) : save_segment_descriptions failed with %d\n", __func__, ret);
897 		return ret;
898 	}
899 
900 	if (context.core_segments_remaining != 0) {
901 		kern_coredump_log(&context, "(%s) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
902 		    __func__, context.core_segments_remaining, context.core_segment_count);
903 		return KERN_FAILURE;
904 	}
905 
906 	/* write out the LC_NOTE with the binary info */
907 	if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
908 		ret = coredump_save_note_description(MAIN_BIN_SPEC_DATA_OWNER, sizeof(main_bin_spec_note_t), &context);
909 	} else {
910 		ret = coredump_save_note_description(DATA_OWNER_LEGACY_BIN_SPEC, sizeof(legacy_bin_spec), &context);
911 	}
912 	if (ret != KERN_SUCCESS) {
913 		kern_coredump_log(&context, "(%s) : coredump_save_note_description returned %d while writing binary info LC_NOTE description", __func__, ret);
914 		return ret;
915 	}
916 
917 	/* Save LC_NOTE desciptions for any additional notes to be included */
918 	if (current_core->kcc_cb.kcc_coredump_save_note_descriptions != NULL) {
919 		ret = current_core->kcc_cb.kcc_coredump_save_note_descriptions(context.core_refcon, coredump_save_note_description, &context);
920 		if (ret != KERN_SUCCESS) {
921 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_descriptions failed with %d\n", __func__, ret);
922 			return ret;
923 		}
924 	}
925 
926 	if (context.core_notes_remaining != 0) {
927 		kern_coredump_log(&context, "(%s) : save_note_descriptions returned without all note descriptions written, %llu of %llu remaining\n",
928 		    __func__, context.core_notes_remaining, context.core_note_count);
929 		return KERN_FAILURE;
930 	}
931 
932 	/*
933 	 * Save the thread commands/state
934 	 *
935 	 * TODO: Should this buffer be allocated at boot rather than on the stack?
936 	 */
937 	if (context.core_thread_state_size) {
938 		char threadstatebuf[context.core_thread_state_size];
939 		ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state,
940 		    &context);
941 		if (ret != KERN_SUCCESS) {
942 			kern_coredump_log(&context, "(%s) : save_thread_state failed with %d\n", __func__, ret);
943 			return ret;
944 		}
945 	}
946 
947 	if (context.core_threads_remaining != 0) {
948 		kern_coredump_log(&context, "(%s) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
949 		    __func__, context.core_threads_remaining, context.core_thread_count);
950 		return KERN_FAILURE;
951 	}
952 	assert(context.core_cur_hoffset == context.core_header_size);
953 
954 	/* Zero fill between the end of the header and the beginning of the segment data file offset */
955 	ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL);
956 	if (ret != KERN_SUCCESS) {
957 		kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
958 		    context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret);
959 		return ret;
960 	}
961 
962 	/* Reset our local current file offset before we start writing out segment data */
963 	context.core_cur_foffset = round_page(context.core_header_size);
964 
965 	ret = current_core->kcc_cb.kcc_coredump_save_segment_data(context.core_refcon, coredump_save_segment_data, &context);
966 	if (ret != KERN_SUCCESS) {
967 		kern_coredump_log(&context, "coredump_save_segment_data failed with %d\n", ret);
968 		return ret;
969 	}
970 
971 	if (context.core_segment_bytes_remaining != 0) {
972 		kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
973 		    context.core_segment_bytes_remaining, context.core_segment_byte_total);
974 		return KERN_FAILURE;
975 	}
976 
977 	/* Save out the LC_NOTE segment data, starting with the binary info / sw vers one */
978 	if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
979 		ret = current_core->kcc_cb.kcc_coredump_save_sw_vers_detail(context.core_refcon, coredump_save_sw_vers, &context);
980 		if (ret != KERN_SUCCESS) {
981 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers_detail_cb failed with 0x%x\n", __func__, ret);
982 			return ret;
983 		}
984 	} else {
985 #pragma clang diagnostic push
986 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
987 		ret = current_core->kcc_cb.kcc_coredump_save_sw_vers(context.core_refcon, coredump_save_sw_vers_legacy, &context);
988 #pragma clang diagnostic pop
989 		if (ret != KERN_SUCCESS) {
990 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers failed with 0x%x\n", __func__, ret);
991 			return ret;
992 		}
993 	}
994 
995 	if (current_core->kcc_cb.kcc_coredump_save_note_data != NULL) {
996 		ret = current_core->kcc_cb.kcc_coredump_save_note_data(context.core_refcon, coredump_save_note_data, &context);
997 		if (ret != KERN_SUCCESS) {
998 			kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data failed with 0x%x\n", __func__, ret);
999 			return ret;
1000 		}
1001 	}
1002 
1003 	if (context.core_note_bytes_remaining != 0) {
1004 		kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data returned without all note data written, %llu of %llu remaining\n",
1005 		    __func__, context.core_note_bytes_remaining, context.core_note_bytes_total);
1006 		return KERN_FAILURE;
1007 	}
1008 
1009 
1010 	/* Flush the last data out */
1011 	ret = kdp_core_output(context.core_outvars, 0, NULL);
1012 	if (ret != KERN_SUCCESS) {
1013 		kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
1014 		    context.core_outvars, ret);
1015 		return ret;
1016 	}
1017 
1018 	kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
1019 	    current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count,
1020 	    (context.core_thread_count * context.core_thread_state_size), context.core_file_length);
1021 
1022 #if CONFIG_CPU_COUNTERS
1023 	end_cycles = mt_cur_cpu_cycles();
1024 	kern_coredump_log(&context, "\nCore dump took %llu cycles\n", end_cycles - start_cycles);
1025 #endif // CONFIG_CPU_COUNTERS
1026 
1027 	if (core_begin_offset) {
1028 		/* If we're writing to disk (we have a begin offset), we need to update the header */
1029 		ret = kern_dump_record_file(context.core_outvars, current_core->kcc_corename, core_begin_offset, &context.core_file_length_compressed, details_flags);
1030 		if (ret != KERN_SUCCESS) {
1031 			*abort_on_failure = TRUE;
1032 			kern_coredump_log(&context, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret);
1033 			return ret;
1034 		}
1035 	}
1036 
1037 	kern_coredump_log(&context, " Compressed file length is %llu bytes\n", context.core_file_length_compressed);
1038 
1039 	*core_file_length = context.core_file_length_compressed;
1040 
1041 	return KERN_SUCCESS;
1042 }
1043 
1044 /*
1045  * Collect coprocessor and userspace coredumps
1046  */
1047 static kern_return_t
kern_do_auxiliary_coredump(void * core_outvars,struct kern_coredump_core * list,uint64_t * last_file_offset,uint64_t details_flags,boolean_t * abort_on_failure)1048 kern_do_auxiliary_coredump(void * core_outvars, struct kern_coredump_core * list, uint64_t * last_file_offset, uint64_t details_flags, boolean_t *abort_on_failure)
1049 {
1050 	struct kern_coredump_core *current_core = list;
1051 	uint64_t prev_core_length = 0;
1052 	kern_coredump_type_t type = current_core == kern_userspace_coredump_core_list ? USERSPACE_COREDUMP : COPROCESSOR_COREDUMP;
1053 	kern_return_t ret = KERN_SUCCESS;
1054 	kern_return_t cur_ret = KERN_SUCCESS;
1055 
1056 	if (type == USERSPACE_COREDUMP && kdp_lck_mtx_lock_spin_is_acquired(&kern_userspace_coredump_core_list_lock)) {
1057 		// Userspace coredump list was being modified at the time of the panic. Skip collecting userspace coredumps
1058 		kern_coredump_log(NULL, "Skipping userspace coredump, coredump list is locked\n");
1059 		return KERN_FAILURE;
1060 	}
1061 
1062 	while (current_core) {
1063 		/* Seek to the beginning of the next file */
1064 		cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
1065 		if (cur_ret != KERN_SUCCESS) {
1066 			kern_coredump_log(NULL, "Failed to seek to beginning of next core\n");
1067 			return KERN_FAILURE;
1068 		}
1069 
1070 		cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, abort_on_failure, type, details_flags);
1071 		if (cur_ret != KERN_SUCCESS) {
1072 			// Fail early without trying remaing corefiles when requested.
1073 			if (*abort_on_failure) {
1074 				// The header may be in an inconsistent state, so bail now
1075 				return KERN_FAILURE;
1076 			} else {
1077 				// Try to capture other corefiles even if one failed, update the overall return
1078 				// status though
1079 				prev_core_length = 0;
1080 				ret = KERN_FAILURE;
1081 			}
1082 		}
1083 
1084 		/* Calculate the offset of the beginning of the next core in the raw file */
1085 		*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1086 		prev_core_length = 0;
1087 		current_core = current_core->kcc_next;
1088 	}
1089 
1090 	return ret;
1091 }
1092 
1093 kern_return_t
kern_do_coredump(void * core_outvars,kern_coredump_flags_t flags,uint64_t first_file_offset,uint64_t * last_file_offset,uint64_t details_flags)1094 kern_do_coredump(void *core_outvars, kern_coredump_flags_t flags, uint64_t first_file_offset, uint64_t *last_file_offset, uint64_t details_flags)
1095 {
1096 	uint64_t prev_core_length = 0;
1097 	kern_return_t cur_ret = KERN_SUCCESS, ret = KERN_SUCCESS;
1098 	boolean_t abort_dump = flags & KCF_ABORT_ON_FAILURE;
1099 
1100 	assert(last_file_offset != NULL);
1101 
1102 	*last_file_offset = first_file_offset;
1103 	cur_ret = kern_coredump_routine(core_outvars, kernel_helper, *last_file_offset, &prev_core_length, &abort_dump, XNU_COREDUMP, details_flags);
1104 
1105 	if (cur_ret != KERN_SUCCESS) {
1106 		// As long as we didn't fail while updating the header for the raw file, we should be able to try
1107 		// to capture other corefiles.
1108 		if (abort_dump) {
1109 			// The header may be in an inconsistent state, so bail now
1110 			return KERN_FAILURE;
1111 		} else {
1112 			prev_core_length = 0;
1113 			ret = KERN_FAILURE;
1114 		}
1115 	}
1116 
1117 	*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1118 
1119 	if (flags & KCF_KERNEL_ONLY) {
1120 		return ret;
1121 	}
1122 
1123 	/* Dump secure kernel if allowed */
1124 	if (sk_helper) {
1125 		/* Seek to the beginning of next file. */
1126 		cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
1127 		if (cur_ret != KERN_SUCCESS) {
1128 			kern_coredump_log(NULL, "secure_core: Unable to seek to the start of file: %d\n", cur_ret);
1129 			return KERN_FAILURE;
1130 		}
1131 
1132 		/* Dump the secure core to disk. */
1133 		cur_ret = kern_coredump_routine(core_outvars, sk_helper, *last_file_offset, &prev_core_length, &abort_dump, SECURE_COREDUMP, details_flags);
1134 		if (cur_ret != KERN_SUCCESS) {
1135 			if (abort_dump) {
1136 				return KERN_FAILURE;
1137 			} else {
1138 				prev_core_length = 0;
1139 				ret = KERN_FAILURE;
1140 			}
1141 		}
1142 
1143 		*last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1144 	}
1145 
1146 	// Collect coprocessor coredumps first, in case userspace coredumps fail
1147 	ret = kern_do_auxiliary_coredump(core_outvars, kern_coredump_core_list, last_file_offset, details_flags, &abort_dump);
1148 	if (ret != KERN_SUCCESS) {
1149 		kern_coredump_log(NULL, "Failed to dump coprocessor cores\n");
1150 		return ret;
1151 	}
1152 
1153 	ret = kern_do_auxiliary_coredump(core_outvars, kern_userspace_coredump_core_list, last_file_offset, details_flags, &abort_dump);
1154 	if (ret != KERN_SUCCESS) {
1155 		kern_coredump_log(NULL, "Failed to dump userspace process cores\n");
1156 		return ret;
1157 	}
1158 
1159 	return KERN_SUCCESS;
1160 }
1161 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1162 
1163 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)1164 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks, void* refcon,
1165     const char *core_description, boolean_t is64bit, uint32_t mh_magic,
1166     cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
1167 {
1168 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
1169 	return KERN_NOT_SUPPORTED;
1170 }
1171 
1172 kern_return_t
kern_register_sk_coredump_helper(__unused kern_coredump_callback_config * sk_callbacks,__unused void * refcon)1173 kern_register_sk_coredump_helper(__unused kern_coredump_callback_config *sk_callbacks, __unused void *refcon)
1174 {
1175 	return KERN_NOT_SUPPORTED;
1176 }
1177 
1178 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)1179 kern_register_userspace_coredump(task_t task, const char * name)
1180 {
1181 	(void)task;
1182 	(void)name;
1183 	return KERN_NOT_SUPPORTED;
1184 }
1185 
1186 kern_return_t
kern_unregister_userspace_coredump(task_t task)1187 kern_unregister_userspace_coredump(task_t task)
1188 {
1189 	(void)task;
1190 	return KERN_NOT_SUPPORTED;
1191 }
1192 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1193 
1194 /*
1195  * Must be callable with a NULL context
1196  */
1197 void
kern_coredump_log(void * context,const char * string,...)1198 kern_coredump_log(void *context, const char *string, ...)
1199 {
1200 #pragma unused(context)
1201 	va_list coredump_log_args;
1202 
1203 	va_start(coredump_log_args, string);
1204 	_doprnt(string, &coredump_log_args, consdebug_putc, 16);
1205 	va_end(coredump_log_args);
1206 
1207 #if defined(__arm64__)
1208 	paniclog_flush();
1209 #endif
1210 }
1211