1 /*
2 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kern/assert.h>
32 #if MONOTONIC
33 #include <kern/monotonic.h>
34 #endif // MONOTONIC
35 #include <kern/zalloc.h>
36 #include <libkern/kernel_mach_header.h>
37 #include <libkern/OSAtomic.h>
38 #include <libsa/types.h>
39 #include <pexpert/pexpert.h>
40 #include <vm/vm_map.h>
41
42 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
43
44 #define roundup(x, y) ((((x) % (y)) == 0) ? \
45 (x) : ((x) + ((y) - ((x) % (y)))))
46
47 #define DATA_OWNER_MAIN_BIN_SPEC "main bin spec"
48 /*
49 * Format of the "main bin spec" LC_NOTE payload as expected by LLDB
50 */
51 typedef struct {
52 uint32_t version; // currently 1
53 uint32_t type; // 0 == unspecified, 1 == kernel, 2 == user process, 3 == standalone (ie FW)
54 uint64_t address; // UINT64_MAX if address not specified
55 uuid_t uuid; // all zero's if uuid not specified
56 uint32_t log2_pagesize; // process page size in log base 2, e.g. 4k pages are 12. 0 for unspecified
57 uint32_t unused; // leave set to 0
58 } __attribute__((packed)) main_bin_spec;
59 #define MAIN_BIN_SPEC_VERSION 1
60 #define MAIN_BIN_SPEC_TYPE_KERNEL 1
61 #define MAIN_BIN_SPEC_TYPE_USER 2
62 #define MAIN_BIN_SPEC_TYPE_STANDALONE 3
63
64 #define DATA_OWNER_LEGACY_BIN_SPEC "kern ver str"
65 /*
66 * Format of the legacy bin spec (LC_IDENT-like) LC_NOTE payload as expected by LLDB
67 */
68 typedef struct {
69 uint32_t version; // currently 1
70 char version_string[KERN_COREDUMP_VERSIONSTRINGMAXSIZE];
71 } __attribute__((packed)) legacy_bin_spec;
72 #define LEGACY_BIN_SPEC_VERSION 1
73
74 __enum_closed_decl(kern_coredump_type_t, uint8_t, {
75 XNU_COREDUMP,
76 USERSPACE_COREDUMP,
77 COPROCESSOR_COREDUMP,
78 NUM_COREDUMP_TYPES,
79 });
80
81 static uint32_t bin_spec_map[NUM_COREDUMP_TYPES] = {
82 [XNU_COREDUMP] = MAIN_BIN_SPEC_TYPE_KERNEL,
83 [USERSPACE_COREDUMP] = MAIN_BIN_SPEC_TYPE_USER,
84 [COPROCESSOR_COREDUMP] = MAIN_BIN_SPEC_TYPE_STANDALONE,
85 };
86
87 /*
88 * The processor_core_context structure describes the current
89 * corefile that's being generated. It also includes a pointer
90 * to the core_outvars which is used by the KDP code for context
91 * about the specific output mechanism being used.
92 *
93 * We include *remaining variables to catch inconsistencies / bugs
94 * in the co-processor coredump callbacks.
95 */
96 typedef struct {
97 struct kdp_core_out_vars * core_outvars; /* Output procedure info (see kdp_out_stage.h) */
98 kern_coredump_callback_config *core_config; /* Information about core currently being dumped */
99 void *core_refcon; /* Reference constant associated with the coredump helper */
100 boolean_t core_should_be_skipped; /* Indicates whether this specific core should not be dumped */
101 boolean_t core_is64bit; /* Bitness of CPU */
102 kern_coredump_type_t core_type; /* Indicates type of this core*/
103 uint32_t core_mh_magic; /* Magic for mach header */
104 cpu_type_t core_cpu_type; /* CPU type for mach header */
105 cpu_subtype_t core_cpu_subtype; /* CPU subtype for mach header */
106 uint64_t core_file_length; /* Overall corefile length including any zero padding */
107 uint64_t core_file_length_compressed; /* File length after compression */
108 uint64_t core_segment_count; /* Number of LC_SEGMENTs in the core currently being dumped */
109 uint64_t core_segments_remaining; /* Number of LC_SEGMENTs that have not been added to the header */
110 uint64_t core_segment_byte_total; /* Sum of all the data from the LC_SEGMENTS in the core */
111 uint64_t core_segment_bytes_remaining; /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
112 uint64_t core_thread_count; /* Number of LC_THREADs to be included */
113 uint64_t core_threads_remaining; /* Number of LC_THREADs that have yet to be included */
114 uint64_t core_thread_state_size; /* Size of each LC_THREAD */
115 uint64_t core_note_count; /* Number of LC_NOTEs to be included */
116 uint64_t core_notes_remaining; /* Number of LC_NOTEs that have not been added to the header */
117 uint64_t core_note_bytes_total; /* Sum of all data from the LC_NOTE segments in the core */
118 uint64_t core_note_bytes_remaining; /* Quantity of data remaining from LC_NOTEs that have yet to be added */
119 uint64_t core_cur_hoffset; /* Current offset in this core's header */
120 uint64_t core_cur_foffset; /* Current offset in this core's overall file */
121 uint64_t core_header_size; /* Size of this core's header */
122 uint64_t core_total_bytes; /* Total amount of data to be included in this core (excluding zero fill) */
123 } processor_core_context;
124
125 /*
126 * The kern_coredump_core structure describes a core that has been
127 * registered for use by the coredump mechanism.
128 */
129 struct kern_coredump_core {
130 struct kern_coredump_core *kcc_next; /* Next processor to dump */
131 void *kcc_refcon; /* Reference constant to be passed to callbacks */
132 char kcc_corename[MACH_CORE_FILEHEADER_NAMELEN]; /* Description of this processor */
133 boolean_t kcc_is64bit; /* Processor bitness */
134 uint32_t kcc_mh_magic; /* Magic for mach header */
135 cpu_type_t kcc_cpu_type; /* CPU type for mach header */
136 cpu_subtype_t kcc_cpu_subtype; /* CPU subtype for mach header */
137 kern_coredump_callback_config kcc_cb; /* Registered processor callbacks for coredump */
138 };
139
140 struct kern_coredump_core * kern_coredump_core_list = NULL;
141 struct kern_coredump_core * kern_userspace_coredump_core_list = NULL;
142 LCK_GRP_DECLARE(kern_userspace_coredump_core_list_lock_grp, "userspace coredump list");
143 LCK_MTX_DECLARE(kern_userspace_coredump_core_list_lock, &kern_userspace_coredump_core_list_lock_grp);
144
145 typedef kern_return_t (*legacy_sw_vers_registered_cb)(void *refcon, core_save_sw_vers_cb callback, void *context);
146
147 uint32_t coredump_registered_count = 0;
148
149 struct kern_coredump_core *kernel_helper = NULL;
150
151 static struct kern_coredump_core *
kern_register_coredump_helper_internal(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,kern_coredump_type_t type,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)152 kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
153 void *refcon, const char *core_description, kern_coredump_type_t type, boolean_t is64bit,
154 uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
155 {
156 struct kern_coredump_core *core_helper = NULL;
157 kern_coredump_callback_config *core_callbacks = NULL;
158
159 if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) {
160 return NULL;
161 }
162 if (kc_callbacks == NULL) {
163 return NULL;
164 }
165 ;
166 if (core_description == NULL) {
167 return NULL;
168 }
169
170 if (kc_callbacks->kcc_coredump_get_summary == NULL ||
171 kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
172 kc_callbacks->kcc_coredump_save_segment_data == NULL ||
173 kc_callbacks->kcc_coredump_save_thread_state == NULL) {
174 return NULL;
175 }
176
177 #pragma clang diagnostic push
178 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
179 legacy_sw_vers_registered_cb legacy_vers_callback = kc_callbacks->kcc_coredump_save_sw_vers;
180 #pragma clang diagnostic pop
181
182 if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
183 if (legacy_vers_callback == NULL &&
184 kc_callbacks->kcc_coredump_save_sw_vers_detail == NULL) {
185 return NULL;
186 }
187 } else {
188 if (legacy_vers_callback == NULL) {
189 return NULL;
190 }
191 }
192
193
194 if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
195 /* Either all note related callbacks should be set or none should be set */
196 if ((kc_callbacks->kcc_coredump_save_note_summary == NULL) != (kc_callbacks->kcc_coredump_save_note_descriptions == NULL)) {
197 return NULL;
198 }
199 if ((kc_callbacks->kcc_coredump_save_note_descriptions == NULL) != (kc_callbacks->kcc_coredump_save_note_data == NULL)) {
200 return NULL;
201 }
202 }
203
204
205 #if !defined(__LP64__)
206 /* We don't support generating 64-bit cores on 32-bit platforms */
207 if (is64bit) {
208 return NULL;
209 }
210 #endif
211
212 core_helper = zalloc_permanent_type(struct kern_coredump_core);
213 core_helper->kcc_next = NULL;
214 core_helper->kcc_refcon = refcon;
215 if (type == XNU_COREDUMP || type == USERSPACE_COREDUMP) {
216 snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
217 } else {
218 assert(type == COPROCESSOR_COREDUMP);
219 /* Make sure there's room for the -coproc suffix (16 - NULL char - strlen(-coproc)) */
220 snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.8s-coproc", core_description);
221 }
222 core_helper->kcc_is64bit = is64bit;
223 core_helper->kcc_mh_magic = mh_magic;
224 core_helper->kcc_cpu_type = cpu_type;
225 core_helper->kcc_cpu_subtype = cpu_subtype;
226 core_callbacks = &core_helper->kcc_cb;
227
228 core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
229 core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
230 core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
231 core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
232 core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
233 #pragma clang diagnostic push
234 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
235 core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;
236 #pragma clang diagnostic pop
237
238
239 if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
240 core_callbacks->kcc_coredump_save_note_summary = kc_callbacks->kcc_coredump_save_note_summary;
241 core_callbacks->kcc_coredump_save_note_descriptions = kc_callbacks->kcc_coredump_save_note_descriptions;
242 core_callbacks->kcc_coredump_save_note_data = kc_callbacks->kcc_coredump_save_note_data;
243 core_callbacks->kcc_coredump_save_sw_vers_detail = kc_callbacks->kcc_coredump_save_sw_vers_detail;
244 }
245
246 if (type == XNU_COREDUMP) {
247 assert(kernel_helper == NULL);
248 kernel_helper = core_helper;
249 } else if (type == USERSPACE_COREDUMP) {
250 lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
251 core_helper->kcc_next = kern_userspace_coredump_core_list;
252 kern_userspace_coredump_core_list = core_helper;
253 lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
254 } else {
255 assert(type == COPROCESSOR_COREDUMP);
256 do {
257 core_helper->kcc_next = kern_coredump_core_list;
258 } while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
259 }
260
261 OSAddAtomic(1, &coredump_registered_count);
262 kprintf("Registered coredump handler for %s\n", core_description);
263
264 return core_helper;
265 }
266
267 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)268 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
269 void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic,
270 cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
271 {
272 if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) {
273 return KERN_RESOURCE_SHORTAGE;
274 }
275
276 if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, COPROCESSOR_COREDUMP,
277 is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) {
278 return KERN_INVALID_ARGUMENT;
279 }
280
281 return KERN_SUCCESS;
282 }
283
284 kern_return_t
kern_register_xnu_coredump_helper(kern_coredump_callback_config * kc_callbacks)285 kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks)
286 {
287 #if defined(__LP64__)
288 boolean_t is64bit = TRUE;
289 #else
290 boolean_t is64bit = FALSE;
291 #endif
292
293 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", XNU_COREDUMP, is64bit,
294 _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
295 return KERN_FAILURE;
296 }
297
298 return KERN_SUCCESS;
299 }
300
301 extern cpu_type_t
302 process_cpu_type(void * bsd_info);
303
304 extern cpu_type_t
305 process_cpu_subtype(void * bsd_info);
306
307 extern char *proc_name_address(void *p);
308
309 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)310 kern_register_userspace_coredump(task_t task, const char * name)
311 {
312 kern_return_t result;
313 struct kern_userspace_coredump_context * context = NULL;
314 boolean_t is64bit;
315 uint32_t mh_magic;
316 uint32_t mh_cputype;
317 uint32_t mh_cpusubtype;
318 kern_coredump_callback_config userkc_callbacks;
319
320 is64bit = task_has_64Bit_addr(task);
321 mh_magic = is64bit ? MH_MAGIC_64 : MH_MAGIC;
322 mh_cputype = process_cpu_type(get_bsdtask_info(task));
323 mh_cpusubtype = process_cpu_subtype(get_bsdtask_info(task));
324
325
326 context = kalloc_type(struct kern_userspace_coredump_context, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
327 context->task = task;
328
329 userkc_callbacks.kcc_coredump_init = user_dump_init;
330 userkc_callbacks.kcc_coredump_get_summary = user_dump_save_summary;
331 userkc_callbacks.kcc_coredump_save_segment_descriptions = user_dump_save_seg_descriptions;
332 userkc_callbacks.kcc_coredump_save_thread_state = user_dump_save_thread_state;
333 userkc_callbacks.kcc_coredump_save_sw_vers_detail = user_dump_save_sw_vers_detail;
334 userkc_callbacks.kcc_coredump_save_segment_data = user_dump_save_segment_data;
335 userkc_callbacks.kcc_coredump_save_note_summary = user_dump_save_note_summary;
336 userkc_callbacks.kcc_coredump_save_note_descriptions = user_dump_save_note_descriptions;
337 userkc_callbacks.kcc_coredump_save_note_data = user_dump_save_note_data;
338
339 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, &userkc_callbacks, context, name, USERSPACE_COREDUMP, is64bit,
340 mh_magic, mh_cputype, mh_cpusubtype) == NULL) {
341 result = KERN_FAILURE;
342 goto finish;
343 }
344
345 result = KERN_SUCCESS;
346
347 finish:
348 if (result != KERN_SUCCESS && context != NULL) {
349 kfree_type(struct kern_userspace_coredump_context, context);
350 }
351
352 return result;
353 }
354
355 kern_return_t
kern_unregister_userspace_coredump(task_t task)356 kern_unregister_userspace_coredump(task_t task)
357 {
358 struct kern_coredump_core * current_core = NULL;
359 struct kern_coredump_core * previous_core = NULL;
360
361 lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
362 current_core = kern_userspace_coredump_core_list;
363 while (current_core) {
364 struct kern_userspace_coredump_context * context = (struct kern_userspace_coredump_context *)current_core->kcc_refcon;
365 assert(context != NULL);
366 if (context->task == task) {
367 /* remove current_core from the list */
368 if (previous_core == NULL) {
369 kern_userspace_coredump_core_list = current_core->kcc_next;
370 } else {
371 previous_core->kcc_next = current_core->kcc_next;
372 }
373 break;
374 }
375 previous_core = current_core;
376 current_core = current_core->kcc_next;
377 }
378 lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
379
380 if (current_core) {
381 kfree_type(struct kern_userspace_coredump_context, current_core->kcc_refcon);
382 OSAddAtomic(-1, &coredump_registered_count);
383 return KERN_SUCCESS;
384 }
385
386 return KERN_NOT_FOUND;
387 }
388
389 /*
390 * Save LC_NOTE metadata about the core we are going to write before we write the mach header
391 */
392 static int
coredump_save_note_summary(uint64_t core_note_count,uint64_t core_note_byte_count,void * context)393 coredump_save_note_summary(uint64_t core_note_count, uint64_t core_note_byte_count, void *context)
394 {
395 processor_core_context *core_context = (processor_core_context *)context;
396
397 if (!core_note_count || !core_note_byte_count || !context) {
398 return KERN_INVALID_ARGUMENT;
399 }
400
401 core_context->core_note_count = core_context->core_notes_remaining = core_note_count;
402 core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = core_note_byte_count;
403
404 return KERN_SUCCESS;
405 }
406
407 /*
408 * Save metadata about the core we're about to write, write out the mach header
409 */
410 static int
coredump_save_summary(uint64_t core_segment_count,uint64_t core_byte_count,uint64_t thread_count,uint64_t thread_state_size,__unused uint64_t misc_bytes_count,void * context)411 coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count,
412 uint64_t thread_count, uint64_t thread_state_size,
413 __unused uint64_t misc_bytes_count, void *context)
414 {
415 processor_core_context *core_context = (processor_core_context *)context;
416 uint32_t sizeofcmds = 0, numcmds = 0;
417 bool should_skip = false;
418 int ret = 0;
419
420 if (!core_segment_count || !core_byte_count || !thread_count || !thread_state_size
421 || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) {
422 return KERN_INVALID_ARGUMENT;
423 }
424
425 /* Initialize core_context */
426 core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count;
427 core_context->core_segment_bytes_remaining = core_context->core_segment_byte_total = core_byte_count;
428 core_context->core_threads_remaining = core_context->core_thread_count = thread_count;
429 core_context->core_thread_state_size = thread_state_size;
430
431 /* Account for the LC_NOTE needed to store version/load information */
432 core_context->core_note_count = core_context->core_notes_remaining = (core_context->core_note_count + 1);
433 size_t vers_note_length = sizeof(main_bin_spec);
434 if (core_context->core_config->kcc_coredump_save_sw_vers_detail == NULL) {
435 vers_note_length = sizeof(legacy_bin_spec);
436 }
437 core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = (core_context->core_note_bytes_total + vers_note_length);
438
439 #if defined(__LP64__)
440 if (core_context->core_is64bit) {
441 sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) +
442 (core_context->core_threads_remaining * core_context->core_thread_state_size) +
443 (core_context->core_note_count * sizeof(struct note_command)));
444 core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64);
445 } else
446 #endif /* defined(__LP64__) */
447 {
448 sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) +
449 (core_context->core_threads_remaining * core_context->core_thread_state_size) +
450 (core_context->core_note_count * sizeof(struct note_command)));
451 core_context->core_header_size = sizeofcmds + sizeof(struct mach_header);
452 }
453
454
455 core_context->core_total_bytes = core_context->core_header_size + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
456 core_context->core_file_length = round_page(core_context->core_header_size) + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
457 core_context->core_cur_foffset = round_page(core_context->core_header_size);
458
459 numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + core_context->core_note_count);
460
461 /*
462 * Reset the zstream and other output context before writing any data out. We do this here
463 * to update the total file length on the outvars before we start writing out.
464 */
465 ret = kdp_reset_output_vars(core_context->core_outvars, core_context->core_file_length, true, &should_skip);
466 if (ret != KERN_SUCCESS) {
467 kern_coredump_log(context, "%s() : failed to reset the out vars : kdp_reset_output_vars(%p, %llu, true, %p) returned error 0x%x\n",
468 __func__, core_context->core_outvars, core_context->core_file_length, &should_skip, ret);
469 return ret;
470 }
471
472 if (should_skip) {
473 core_context->core_should_be_skipped = TRUE;
474 return KERN_SUCCESS;
475 }
476
477 /* Construct core file header */
478 #if defined(__LP64__)
479 if (core_context->core_is64bit) {
480 struct mach_header_64 core_header = { };
481
482 core_header.magic = core_context->core_mh_magic;
483 core_header.cputype = core_context->core_cpu_type;
484 core_header.cpusubtype = core_context->core_cpu_subtype;
485 core_header.filetype = MH_CORE;
486 core_header.ncmds = numcmds;
487 core_header.sizeofcmds = sizeofcmds;
488 core_header.flags = 0;
489
490 /* Send the core_header to the output procedure */
491 ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
492 if (ret != KERN_SUCCESS) {
493 kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
494 __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
495 return ret;
496 }
497
498 core_context->core_cur_hoffset += sizeof(core_header);
499 } else
500 #endif /* defined(__LP64__) */
501 {
502 struct mach_header core_header = { };
503
504 core_header.magic = core_context->core_mh_magic;
505 core_header.cputype = core_context->core_cpu_type;
506 core_header.cpusubtype = core_context->core_cpu_subtype;
507 core_header.filetype = MH_CORE;
508 core_header.ncmds = numcmds;
509 core_header.sizeofcmds = sizeofcmds;
510 core_header.flags = 0;
511
512 /* Send the core_header to the output procedure */
513 ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
514 if (ret != KERN_SUCCESS) {
515 kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
516 __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
517 return ret;
518 }
519
520 core_context->core_cur_hoffset += sizeof(core_header);
521 }
522
523 return KERN_SUCCESS;
524 }
525
526 /*
527 * Construct a segment command for the specified segment.
528 */
529 static int
coredump_save_segment_descriptions(uint64_t seg_start,uint64_t seg_end,void * context)530 coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end,
531 void *context)
532 {
533 processor_core_context *core_context = (processor_core_context *)context;
534 int ret;
535 uint64_t size = seg_end - seg_start;
536
537 if (seg_end <= seg_start) {
538 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
539 __func__, seg_start, seg_end, context, seg_start, seg_end);
540 return KERN_INVALID_ARGUMENT;
541 }
542
543 if (core_context->core_segments_remaining == 0) {
544 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
545 __func__, seg_start, seg_end, context, core_context->core_segment_count);
546 return KERN_INVALID_ARGUMENT;
547 }
548
549 /* Construct segment command */
550 #if defined(__LP64__)
551 if (core_context->core_is64bit) {
552 struct segment_command_64 seg_command = { };
553
554 if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
555 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
556 __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
557 return KERN_NO_SPACE;
558 }
559
560 seg_command.cmd = LC_SEGMENT_64;
561 seg_command.cmdsize = sizeof(seg_command);
562 seg_command.segname[0] = 0;
563 seg_command.vmaddr = seg_start;
564 seg_command.vmsize = size;
565 seg_command.fileoff = core_context->core_cur_foffset;
566 seg_command.filesize = size;
567 seg_command.maxprot = VM_PROT_READ;
568 seg_command.initprot = VM_PROT_READ;
569
570 /* Flush new command to output */
571 ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
572 if (ret != KERN_SUCCESS) {
573 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
574 __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
575 core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
576 return ret;
577 }
578
579 core_context->core_cur_hoffset += sizeof(seg_command);
580 } else
581 #endif /* defined(__LP64__) */
582 {
583 struct segment_command seg_command = { };
584
585 if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) {
586 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
587 __func__, seg_start, seg_end, context, seg_start, seg_end);
588 return KERN_INVALID_ARGUMENT;
589 }
590
591 if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
592 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
593 __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
594 return KERN_NO_SPACE;
595 }
596
597 seg_command.cmd = LC_SEGMENT;
598 seg_command.cmdsize = sizeof(seg_command);
599 seg_command.segname[0] = 0;
600 seg_command.vmaddr = (uint32_t) seg_start;
601 seg_command.vmsize = (uint32_t) size;
602 seg_command.fileoff = (uint32_t) core_context->core_cur_foffset;
603 seg_command.filesize = (uint32_t) size;
604 seg_command.maxprot = VM_PROT_READ;
605 seg_command.initprot = VM_PROT_READ;
606
607 /* Flush new command to output */
608 ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
609 if (ret != KERN_SUCCESS) {
610 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
611 __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
612 core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
613 return ret;
614 }
615
616 core_context->core_cur_hoffset += sizeof(seg_command);
617 }
618
619 /* Update coredump context */
620 core_context->core_segments_remaining--;
621 core_context->core_cur_foffset += size;
622
623 return KERN_SUCCESS;
624 }
625
626 /*
627 * Construct a LC_NOTE command for the specified note
628 */
629 static int
coredump_save_note_description(const char * data_owner,uint64_t length,void * context)630 coredump_save_note_description(const char * data_owner, uint64_t length, void *context)
631 {
632 processor_core_context *core_context = (processor_core_context *)context;
633 int ret;
634
635 if (data_owner == NULL || (strlen(data_owner) == 0)) {
636 kern_coredump_log(context, "%s() called with invalid data_owner\n", __func__);
637 return KERN_INVALID_ARGUMENT;
638 }
639
640 if (core_context->core_notes_remaining == 0) {
641 kern_coredump_log(context, "%s() called too many times, %llu note descriptions already recorded\n",
642 __func__, core_context->core_note_count);
643 return KERN_INVALID_ARGUMENT;
644 }
645
646 struct note_command note = { .cmd = LC_NOTE,
647 .cmdsize = sizeof(struct note_command),
648 .offset = core_context->core_cur_foffset,
649 .size = length, };
650 strlcpy((char *) ¬e.data_owner, data_owner, sizeof(note.data_owner));
651
652 /* Flush new command to output */
653 ret = kdp_core_output(core_context->core_outvars, sizeof(note), (caddr_t)¬e);
654 if (ret != KERN_SUCCESS) {
655 kern_coredump_log(context, "%s() : failed to write note %llu of %llu : kdp_core_output() returned error 0x%x\n",
656 __func__, core_context->core_note_count - core_context->core_notes_remaining,
657 core_context->core_note_count, ret);
658 return ret;
659 }
660
661 /* Update coredump context */
662 core_context->core_cur_foffset += length;
663 core_context->core_cur_hoffset += sizeof(note);
664 core_context->core_notes_remaining--;
665
666 return KERN_SUCCESS;
667 }
668
669 /*
670 * Save thread state.
671 *
672 * Passed thread_state is expected to be a struct thread_command
673 */
674 static int
coredump_save_thread_state(void * thread_state,void * context)675 coredump_save_thread_state(void *thread_state, void *context)
676 {
677 processor_core_context *core_context = (processor_core_context *)context;
678 struct thread_command *tc = (struct thread_command *)thread_state;
679 int ret;
680
681 if (tc->cmd != LC_THREAD) {
682 kern_coredump_log(context, "%s() : found %d expected LC_THREAD (%d)\n", __func__, tc->cmd, LC_THREAD);
683 return KERN_INVALID_ARGUMENT;
684 }
685
686 if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) {
687 kern_coredump_log(context, "%s() : ran out of space to save threads with %llu of %llu remaining\n", __func__,
688 core_context->core_threads_remaining, core_context->core_thread_count);
689 return KERN_NO_SPACE;
690 }
691
692 ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state);
693 if (ret != KERN_SUCCESS) {
694 kern_coredump_log(context, "%s() : failed to write thread data : kdp_core_output() returned 0x%x\n", __func__, ret);
695 return ret;
696 }
697
698 core_context->core_threads_remaining--;
699 core_context->core_cur_hoffset += core_context->core_thread_state_size;
700
701 return KERN_SUCCESS;
702 }
703
704 static int
coredump_save_segment_data(void * seg_data,uint64_t length,void * context)705 coredump_save_segment_data(void *seg_data, uint64_t length, void *context)
706 {
707 int ret;
708 processor_core_context *core_context = (processor_core_context *)context;
709
710 if (length > core_context->core_segment_bytes_remaining) {
711 kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
712 seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining,
713 core_context->core_segment_bytes_remaining);
714 return KERN_INVALID_ARGUMENT;
715 }
716
717 ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data);
718 if (ret != KERN_SUCCESS) {
719 kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
720 core_context->core_segment_bytes_remaining, ret);
721 return ret;
722 }
723
724 core_context->core_segment_bytes_remaining -= length;
725 core_context->core_cur_foffset += length;
726
727 return KERN_SUCCESS;
728 }
729
730 static int
coredump_save_note_data(void * note_data,uint64_t length,void * context)731 coredump_save_note_data(void *note_data, uint64_t length, void *context)
732 {
733 int ret;
734 processor_core_context *core_context = (processor_core_context *)context;
735
736 if (length > core_context->core_note_bytes_remaining) {
737 kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
738 note_data, length, context, core_context->core_note_bytes_total - core_context->core_note_bytes_remaining,
739 core_context->core_note_bytes_remaining);
740 return KERN_INVALID_ARGUMENT;
741 }
742
743 ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)note_data);
744 if (ret != KERN_SUCCESS) {
745 kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
746 core_context->core_note_bytes_remaining, ret);
747 return ret;
748 }
749
750 core_context->core_note_bytes_remaining -= length;
751 core_context->core_cur_foffset += length;
752
753 return KERN_SUCCESS;
754 }
755
756 static int
coredump_save_sw_vers_legacy(void * sw_vers,uint64_t length,void * context)757 coredump_save_sw_vers_legacy(void *sw_vers, uint64_t length, void *context)
758 {
759 processor_core_context *core_context = (processor_core_context *)context;
760 int ret;
761
762 if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) {
763 kern_coredump_log(context, "%s(%p, %llu, %p) : called with invalid length %llu\n", __func__,
764 sw_vers, length, context, length);
765 return KERN_INVALID_ARGUMENT;
766 }
767
768 uint32_t version = LEGACY_BIN_SPEC_VERSION;
769 ret = coredump_save_note_data(&version, sizeof(version), context);
770 if (ret != KERN_SUCCESS) {
771 kern_coredump_log(context, "%s() : failed to write legacy bin spec version : coredump_save_note_data() returned 0x%x\n",
772 __func__, ret);
773 return ret;
774 }
775
776 ret = coredump_save_note_data(sw_vers, length, context);
777 if (ret != KERN_SUCCESS) {
778 kern_coredump_log(context, "%s() : failed to write sw_vers string : coredump_save_note_data() returned 0x%x\n",
779 __func__, ret);
780 return ret;
781 }
782
783 if (length < KERN_COREDUMP_VERSIONSTRINGMAXSIZE) {
784 /* Zero fill to the full size */
785 uint64_t length_to_zero = (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length);
786 ret = kdp_core_output(core_context->core_outvars, length_to_zero, NULL);
787 if (ret != KERN_SUCCESS) {
788 kern_coredump_log(context, "%s() : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
789 __func__, core_context->core_outvars, length_to_zero, ret);
790 return ret;
791 }
792
793 core_context->core_note_bytes_remaining -= length_to_zero;
794 core_context->core_cur_foffset += length_to_zero;
795 }
796
797 return KERN_SUCCESS;
798 }
799
800 static int
coredump_save_sw_vers(uint64_t address,uuid_t uuid,uint32_t log2_pagesize,void * context)801 coredump_save_sw_vers(uint64_t address, uuid_t uuid, uint32_t log2_pagesize, void *context)
802 {
803 processor_core_context *core_context = (processor_core_context *)context;
804 int ret;
805
806 uint32_t type = bin_spec_map[core_context->core_type];
807 main_bin_spec spec = { .version = MAIN_BIN_SPEC_VERSION,
808 .type = type,
809 .address = address,
810 .log2_pagesize = log2_pagesize, };
811 uuid_copy(*((uuid_t *)&spec.uuid), uuid);
812
813 ret = coredump_save_note_data(&spec, sizeof(spec), context);
814 if (ret != KERN_SUCCESS) {
815 kern_coredump_log(context, "%s() : failed to write main bin spec structure : coredump_save_note_data() returned 0x%x\n", __func__, ret);
816 return ret;
817 }
818
819 return KERN_SUCCESS;
820 }
821
822 static kern_return_t
kern_coredump_routine(void * core_outvars,struct kern_coredump_core * current_core,uint64_t core_begin_offset,uint64_t * core_file_length,boolean_t * header_update_failed,kern_coredump_type_t type,uint64_t details_flags)823 kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_core, uint64_t core_begin_offset, uint64_t *core_file_length, boolean_t *header_update_failed, kern_coredump_type_t type, uint64_t details_flags)
824 {
825 #if MONOTONIC
826 uint64_t start_cycles;
827 uint64_t end_cycles;
828 #endif // MONOTONIC
829 kern_return_t ret;
830 processor_core_context context = { };
831 *core_file_length = 0;
832 *header_update_failed = FALSE;
833
834 #if MONOTONIC
835 start_cycles = mt_cur_cpu_cycles();
836 #endif // MONOTONIC
837
838 /* Setup the coredump context */
839 context.core_outvars = core_outvars;
840 context.core_config = ¤t_core->kcc_cb;
841 context.core_refcon = current_core->kcc_refcon;
842 context.core_is64bit = current_core->kcc_is64bit;
843 context.core_mh_magic = current_core->kcc_mh_magic;
844 context.core_cpu_type = current_core->kcc_cpu_type;
845 context.core_cpu_subtype = current_core->kcc_cpu_subtype;
846 context.core_type = type;
847
848 kern_coredump_log(&context, "\nBeginning coredump of %s\n", current_core->kcc_corename);
849
850 if (current_core->kcc_cb.kcc_coredump_init != NULL) {
851 ret = current_core->kcc_cb.kcc_coredump_init(context.core_refcon, &context);
852 if (ret == KERN_NODE_DOWN) {
853 kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
854 return KERN_SUCCESS;
855 } else if (ret != KERN_SUCCESS) {
856 kern_coredump_log(&context, "(%s) : coredump_init failed with %d\n", __func__, ret);
857 return ret;
858 }
859 }
860
861 /* Retrieve information about LC_NOTE data we will write out as part of the core before we populate the general header */
862 if (current_core->kcc_cb.kcc_coredump_save_note_summary != NULL) {
863 ret = current_core->kcc_cb.kcc_coredump_save_note_summary(context.core_refcon, coredump_save_note_summary, &context);
864 if (ret != KERN_SUCCESS) {
865 kern_coredump_log(&context, "(%s) : save_note_note_summary failed with %d\n", __func__, ret);
866 return ret;
867 }
868 }
869
870 /* Populate the context with metadata about the corefile (cmd info, sizes etc) */
871 ret = current_core->kcc_cb.kcc_coredump_get_summary(context.core_refcon, coredump_save_summary, &context);
872 if (ret != KERN_SUCCESS) {
873 kern_coredump_log(&context, "(%s) : get_summary failed with %d\n", __func__, ret);
874 return ret;
875 }
876
877 if (context.core_should_be_skipped) {
878 kern_coredump_log(&context, "Skipping coredump\n");
879 return KERN_SUCCESS;
880 }
881
882 if (context.core_header_size == 0) {
883 kern_coredump_log(&context, "(%s) : header size not populated after coredump_get_summary\n", __func__);
884 return KERN_FAILURE;
885 }
886
887 /* Save the segment descriptions for the segments to be included */
888 ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions,
889 &context);
890 if (ret != KERN_SUCCESS) {
891 kern_coredump_log(&context, "(%s) : save_segment_descriptions failed with %d\n", __func__, ret);
892 return ret;
893 }
894
895 if (context.core_segments_remaining != 0) {
896 kern_coredump_log(&context, "(%s) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
897 __func__, context.core_segments_remaining, context.core_segment_count);
898 return KERN_FAILURE;
899 }
900
901 /* write out the LC_NOTE with the binary info */
902 if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
903 ret = coredump_save_note_description(DATA_OWNER_MAIN_BIN_SPEC, sizeof(main_bin_spec), &context);
904 } else {
905 ret = coredump_save_note_description(DATA_OWNER_LEGACY_BIN_SPEC, sizeof(legacy_bin_spec), &context);
906 }
907 if (ret != KERN_SUCCESS) {
908 kern_coredump_log(&context, "(%s) : coredump_save_note_description returned %d while writing binary info LC_NOTE description", __func__, ret);
909 return ret;
910 }
911
912 /* Save LC_NOTE desciptions for any additional notes to be included */
913 if (current_core->kcc_cb.kcc_coredump_save_note_descriptions != NULL) {
914 ret = current_core->kcc_cb.kcc_coredump_save_note_descriptions(context.core_refcon, coredump_save_note_description, &context);
915 if (ret != KERN_SUCCESS) {
916 kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_descriptions failed with %d\n", __func__, ret);
917 return ret;
918 }
919 }
920
921 if (context.core_notes_remaining != 0) {
922 kern_coredump_log(&context, "(%s) : save_note_descriptions returned without all note descriptions written, %llu of %llu remaining\n",
923 __func__, context.core_notes_remaining, context.core_note_count);
924 return KERN_FAILURE;
925 }
926
927 /*
928 * Save the thread commands/state
929 *
930 * TODO: Should this buffer be allocated at boot rather than on the stack?
931 */
932 if (context.core_thread_state_size) {
933 char threadstatebuf[context.core_thread_state_size];
934 ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state,
935 &context);
936 if (ret != KERN_SUCCESS) {
937 kern_coredump_log(&context, "(%s) : save_thread_state failed with %d\n", __func__, ret);
938 return ret;
939 }
940 }
941
942 if (context.core_threads_remaining != 0) {
943 kern_coredump_log(&context, "(%s) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
944 __func__, context.core_threads_remaining, context.core_thread_count);
945 return KERN_FAILURE;
946 }
947 assert(context.core_cur_hoffset == context.core_header_size);
948
949 /* Zero fill between the end of the header and the beginning of the segment data file offset */
950 ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL);
951 if (ret != KERN_SUCCESS) {
952 kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
953 context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret);
954 return ret;
955 }
956
957 /* Reset our local current file offset before we start writing out segment data */
958 context.core_cur_foffset = round_page(context.core_header_size);
959
960 ret = current_core->kcc_cb.kcc_coredump_save_segment_data(context.core_refcon, coredump_save_segment_data, &context);
961 if (ret != KERN_SUCCESS) {
962 kern_coredump_log(&context, "coredump_save_segment_data failed with %d\n", ret);
963 return ret;
964 }
965
966 if (context.core_segment_bytes_remaining != 0) {
967 kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
968 context.core_segment_bytes_remaining, context.core_segment_byte_total);
969 return KERN_FAILURE;
970 }
971
972 /* Save out the LC_NOTE segment data, starting with the binary info / sw vers one */
973 if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
974 ret = current_core->kcc_cb.kcc_coredump_save_sw_vers_detail(context.core_refcon, coredump_save_sw_vers, &context);
975 if (ret != KERN_SUCCESS) {
976 kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers_detail_cb failed with 0x%x\n", __func__, ret);
977 return ret;
978 }
979 } else {
980 #pragma clang diagnostic push
981 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
982 ret = current_core->kcc_cb.kcc_coredump_save_sw_vers(context.core_refcon, coredump_save_sw_vers_legacy, &context);
983 #pragma clang diagnostic pop
984 if (ret != KERN_SUCCESS) {
985 kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers failed with 0x%x\n", __func__, ret);
986 return ret;
987 }
988 }
989
990 if (current_core->kcc_cb.kcc_coredump_save_note_data != NULL) {
991 ret = current_core->kcc_cb.kcc_coredump_save_note_data(context.core_refcon, coredump_save_note_data, &context);
992 if (ret != KERN_SUCCESS) {
993 kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data failed with 0x%x\n", __func__, ret);
994 return ret;
995 }
996 }
997
998 if (context.core_note_bytes_remaining != 0) {
999 kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data returned without all note data written, %llu of %llu remaining\n",
1000 __func__, context.core_note_bytes_remaining, context.core_note_bytes_total);
1001 return KERN_FAILURE;
1002 }
1003
1004
1005 /* Flush the last data out */
1006 ret = kdp_core_output(context.core_outvars, 0, NULL);
1007 if (ret != KERN_SUCCESS) {
1008 kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
1009 context.core_outvars, ret);
1010 return ret;
1011 }
1012
1013 kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
1014 current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count,
1015 (context.core_thread_count * context.core_thread_state_size), context.core_file_length);
1016
1017 #if MONOTONIC
1018 end_cycles = mt_cur_cpu_cycles();
1019 kern_coredump_log(&context, "\nCore dump took %llu cycles\n", end_cycles - start_cycles);
1020 #endif // MONOTONIC
1021
1022 if (core_begin_offset) {
1023 /* If we're writing to disk (we have a begin offset), we need to update the header */
1024 ret = kern_dump_record_file(context.core_outvars, current_core->kcc_corename, core_begin_offset, &context.core_file_length_compressed, details_flags);
1025 if (ret != KERN_SUCCESS) {
1026 *header_update_failed = TRUE;
1027 kern_coredump_log(&context, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret);
1028 return ret;
1029 }
1030 }
1031
1032 kern_coredump_log(&context, " Compressed file length is %llu bytes\n", context.core_file_length_compressed);
1033
1034 *core_file_length = context.core_file_length_compressed;
1035
1036 return KERN_SUCCESS;
1037 }
1038
1039 /*
1040 * Collect coprocessor and userspace coredumps
1041 */
1042 static kern_return_t
kern_do_auxiliary_coredump(void * core_outvars,struct kern_coredump_core * list,uint64_t * last_file_offset,uint64_t details_flags)1043 kern_do_auxiliary_coredump(void * core_outvars, struct kern_coredump_core * list, uint64_t * last_file_offset, uint64_t details_flags)
1044 {
1045 struct kern_coredump_core *current_core = list;
1046 uint64_t prev_core_length = 0;
1047 boolean_t header_update_failed = FALSE;
1048 kern_coredump_type_t type = current_core == kern_userspace_coredump_core_list ? USERSPACE_COREDUMP : COPROCESSOR_COREDUMP;
1049 kern_return_t ret = KERN_SUCCESS;
1050 kern_return_t cur_ret = KERN_SUCCESS;
1051
1052 if (type == USERSPACE_COREDUMP && kdp_lck_mtx_lock_spin_is_acquired(&kern_userspace_coredump_core_list_lock)) {
1053 // Userspace coredump list was being modified at the time of the panic. Skip collecting userspace coredumps
1054 kern_coredump_log(NULL, "Skipping userspace coredump, coredump list is locked\n");
1055 return KERN_FAILURE;
1056 }
1057
1058 while (current_core) {
1059 /* Seek to the beginning of the next file */
1060 cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
1061 if (cur_ret != KERN_SUCCESS) {
1062 kern_coredump_log(NULL, "Failed to seek to beginning of next core\n");
1063 return KERN_FAILURE;
1064 }
1065
1066 cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, &header_update_failed, type, details_flags);
1067 if (cur_ret != KERN_SUCCESS) {
1068 // As long as we didn't fail while updating the header for the raw file, we should be able to try
1069 // to capture other corefiles.
1070 if (header_update_failed) {
1071 // The header may be in an inconsistent state, so bail now
1072 return KERN_FAILURE;
1073 } else {
1074 // Try to capture other corefiles even if one failed, update the overall return
1075 // status though
1076 prev_core_length = 0;
1077 ret = KERN_FAILURE;
1078 }
1079 }
1080
1081 /* Calculate the offset of the beginning of the next core in the raw file */
1082 *last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1083 prev_core_length = 0;
1084 current_core = current_core->kcc_next;
1085 }
1086
1087 return ret;
1088 }
1089
1090 kern_return_t
kern_do_coredump(void * core_outvars,boolean_t kernel_only,uint64_t first_file_offset,uint64_t * last_file_offset,uint64_t details_flags)1091 kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_offset, uint64_t *last_file_offset, uint64_t details_flags)
1092 {
1093 uint64_t prev_core_length = 0;
1094 kern_return_t cur_ret = KERN_SUCCESS, ret = KERN_SUCCESS;
1095 boolean_t header_update_failed = FALSE;
1096
1097 assert(last_file_offset != NULL);
1098
1099 *last_file_offset = first_file_offset;
1100 cur_ret = kern_coredump_routine(core_outvars, kernel_helper, *last_file_offset, &prev_core_length, &header_update_failed, XNU_COREDUMP, details_flags);
1101 if (cur_ret != KERN_SUCCESS) {
1102 // As long as we didn't fail while updating the header for the raw file, we should be able to try
1103 // to capture other corefiles.
1104 if (header_update_failed) {
1105 // The header may be in an inconsistent state, so bail now
1106 return KERN_FAILURE;
1107 } else {
1108 prev_core_length = 0;
1109 ret = KERN_FAILURE;
1110 }
1111 }
1112
1113 *last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1114
1115 if (kernel_only) {
1116 return ret;
1117 }
1118
1119 // Collect coprocessor coredumps first, in case userspace coredumps fail
1120 ret = kern_do_auxiliary_coredump(core_outvars, kern_coredump_core_list, last_file_offset, details_flags);
1121 if (ret != KERN_SUCCESS) {
1122 kern_coredump_log(NULL, "Failed to dump coprocessor cores\n");
1123 return ret;
1124 }
1125
1126 ret = kern_do_auxiliary_coredump(core_outvars, kern_userspace_coredump_core_list, last_file_offset, details_flags);
1127 if (ret != KERN_SUCCESS) {
1128 kern_coredump_log(NULL, "Failed to dump userspace process cores\n");
1129 return ret;
1130 }
1131
1132 return KERN_SUCCESS;
1133 }
1134 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1135
1136 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)1137 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks, void* refcon,
1138 const char *core_description, boolean_t is64bit, uint32_t mh_magic,
1139 cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
1140 {
1141 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
1142 return KERN_NOT_SUPPORTED;
1143 }
1144
1145 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)1146 kern_register_userspace_coredump(task_t task, const char * name)
1147 {
1148 (void)task;
1149 (void)name;
1150 return KERN_NOT_SUPPORTED;
1151 }
1152
1153 kern_return_t
kern_unregister_userspace_coredump(task_t task)1154 kern_unregister_userspace_coredump(task_t task)
1155 {
1156 (void)task;
1157 return KERN_NOT_SUPPORTED;
1158 }
1159 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1160
1161 /*
1162 * Must be callable with a NULL context
1163 */
1164 void
kern_coredump_log(void * context,const char * string,...)1165 kern_coredump_log(void *context, const char *string, ...)
1166 {
1167 #pragma unused(context)
1168 va_list coredump_log_args;
1169
1170 va_start(coredump_log_args, string);
1171 _doprnt(string, &coredump_log_args, consdebug_putc, 16);
1172 va_end(coredump_log_args);
1173
1174 #if defined(__arm64__)
1175 paniclog_flush();
1176 #endif
1177 }
1178