1 /*
2 * Copyright (c) 2017 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kdp/kdp_core.h>
30 #include <kdp/processor_core.h>
31 #include <kdp/core_notes.h>
32 #include <kern/assert.h>
33 #if MONOTONIC
34 #include <kern/monotonic.h>
35 #endif // MONOTONIC
36 #include <kern/zalloc.h>
37 #include <libkern/kernel_mach_header.h>
38 #include <libkern/OSAtomic.h>
39 #include <libsa/types.h>
40 #include <pexpert/pexpert.h>
41 #include <vm/vm_map.h>
42
43 #ifdef CONFIG_KDP_INTERACTIVE_DEBUGGING
44
45 #define roundup(x, y) ((((x) % (y)) == 0) ? \
46 (x) : ((x) + ((y) - ((x) % (y)))))
47
48 #define DATA_OWNER_LEGACY_BIN_SPEC "kern ver str"
49 /*
50 * Format of the legacy bin spec (LC_IDENT-like) LC_NOTE payload as expected by LLDB
51 */
52 typedef struct {
53 uint32_t version; // currently 1
54 char version_string[KERN_COREDUMP_VERSIONSTRINGMAXSIZE];
55 } __attribute__((packed)) legacy_bin_spec;
56 #define LEGACY_BIN_SPEC_VERSION 1
57
58 __enum_closed_decl(kern_coredump_type_t, uint8_t, {
59 XNU_COREDUMP,
60 USERSPACE_COREDUMP,
61 COPROCESSOR_COREDUMP,
62 NUM_COREDUMP_TYPES,
63 });
64
65 static uint32_t bin_spec_map[NUM_COREDUMP_TYPES] = {
66 [XNU_COREDUMP] = MAIN_BIN_SPEC_TYPE_KERNEL,
67 [USERSPACE_COREDUMP] = MAIN_BIN_SPEC_TYPE_USER,
68 [COPROCESSOR_COREDUMP] = MAIN_BIN_SPEC_TYPE_STANDALONE,
69 };
70
71 /*
72 * The processor_core_context structure describes the current
73 * corefile that's being generated. It also includes a pointer
74 * to the core_outvars which is used by the KDP code for context
75 * about the specific output mechanism being used.
76 *
77 * We include *remaining variables to catch inconsistencies / bugs
78 * in the co-processor coredump callbacks.
79 */
80 typedef struct {
81 struct kdp_core_out_vars * core_outvars; /* Output procedure info (see kdp_out_stage.h) */
82 kern_coredump_callback_config *core_config; /* Information about core currently being dumped */
83 void *core_refcon; /* Reference constant associated with the coredump helper */
84 boolean_t core_should_be_skipped; /* Indicates whether this specific core should not be dumped */
85 boolean_t core_is64bit; /* Bitness of CPU */
86 kern_coredump_type_t core_type; /* Indicates type of this core*/
87 uint32_t core_mh_magic; /* Magic for mach header */
88 cpu_type_t core_cpu_type; /* CPU type for mach header */
89 cpu_subtype_t core_cpu_subtype; /* CPU subtype for mach header */
90 uint64_t core_file_length; /* Overall corefile length including any zero padding */
91 uint64_t core_file_length_compressed; /* File length after compression */
92 uint64_t core_segment_count; /* Number of LC_SEGMENTs in the core currently being dumped */
93 uint64_t core_segments_remaining; /* Number of LC_SEGMENTs that have not been added to the header */
94 uint64_t core_segment_byte_total; /* Sum of all the data from the LC_SEGMENTS in the core */
95 uint64_t core_segment_bytes_remaining; /* Quantity of data remaining from LC_SEGMENTs that have yet to be added */
96 uint64_t core_thread_count; /* Number of LC_THREADs to be included */
97 uint64_t core_threads_remaining; /* Number of LC_THREADs that have yet to be included */
98 uint64_t core_thread_state_size; /* Size of each LC_THREAD */
99 uint64_t core_note_count; /* Number of LC_NOTEs to be included */
100 uint64_t core_notes_remaining; /* Number of LC_NOTEs that have not been added to the header */
101 uint64_t core_note_bytes_total; /* Sum of all data from the LC_NOTE segments in the core */
102 uint64_t core_note_bytes_remaining; /* Quantity of data remaining from LC_NOTEs that have yet to be added */
103 uint64_t core_cur_hoffset; /* Current offset in this core's header */
104 uint64_t core_cur_foffset; /* Current offset in this core's overall file */
105 uint64_t core_header_size; /* Size of this core's header */
106 uint64_t core_total_bytes; /* Total amount of data to be included in this core (excluding zero fill) */
107 } processor_core_context;
108
109 /*
110 * The kern_coredump_core structure describes a core that has been
111 * registered for use by the coredump mechanism.
112 */
113 struct kern_coredump_core {
114 struct kern_coredump_core *kcc_next; /* Next processor to dump */
115 void *kcc_refcon; /* Reference constant to be passed to callbacks */
116 char kcc_corename[MACH_CORE_FILEHEADER_NAMELEN]; /* Description of this processor */
117 boolean_t kcc_is64bit; /* Processor bitness */
118 uint32_t kcc_mh_magic; /* Magic for mach header */
119 cpu_type_t kcc_cpu_type; /* CPU type for mach header */
120 cpu_subtype_t kcc_cpu_subtype; /* CPU subtype for mach header */
121 kern_coredump_callback_config kcc_cb; /* Registered processor callbacks for coredump */
122 };
123
124 struct kern_coredump_core * kern_coredump_core_list = NULL;
125 struct kern_coredump_core * kern_userspace_coredump_core_list = NULL;
126 LCK_GRP_DECLARE(kern_userspace_coredump_core_list_lock_grp, "userspace coredump list");
127 LCK_MTX_DECLARE(kern_userspace_coredump_core_list_lock, &kern_userspace_coredump_core_list_lock_grp);
128
129 typedef kern_return_t (*legacy_sw_vers_registered_cb)(void *refcon, core_save_sw_vers_cb callback, void *context);
130
131 uint32_t coredump_registered_count = 0;
132
133 struct kern_coredump_core *kernel_helper = NULL;
134
135 static struct kern_coredump_core *
kern_register_coredump_helper_internal(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,kern_coredump_type_t type,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)136 kern_register_coredump_helper_internal(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
137 void *refcon, const char *core_description, kern_coredump_type_t type, boolean_t is64bit,
138 uint32_t mh_magic, cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
139 {
140 struct kern_coredump_core *core_helper = NULL;
141 kern_coredump_callback_config *core_callbacks = NULL;
142
143 if (kern_coredump_config_vers < KERN_COREDUMP_MIN_CONFIG_VERSION) {
144 return NULL;
145 }
146 if (kc_callbacks == NULL) {
147 return NULL;
148 }
149 ;
150 if (core_description == NULL) {
151 return NULL;
152 }
153
154 if (kc_callbacks->kcc_coredump_get_summary == NULL ||
155 kc_callbacks->kcc_coredump_save_segment_descriptions == NULL ||
156 kc_callbacks->kcc_coredump_save_segment_data == NULL ||
157 kc_callbacks->kcc_coredump_save_thread_state == NULL) {
158 return NULL;
159 }
160
161 #pragma clang diagnostic push
162 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
163 legacy_sw_vers_registered_cb legacy_vers_callback = kc_callbacks->kcc_coredump_save_sw_vers;
164 #pragma clang diagnostic pop
165
166 if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
167 if (legacy_vers_callback == NULL &&
168 kc_callbacks->kcc_coredump_save_sw_vers_detail == NULL) {
169 return NULL;
170 }
171 } else {
172 if (legacy_vers_callback == NULL) {
173 return NULL;
174 }
175 }
176
177
178 if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
179 /* Either all note related callbacks should be set or none should be set */
180 if ((kc_callbacks->kcc_coredump_save_note_summary == NULL) != (kc_callbacks->kcc_coredump_save_note_descriptions == NULL)) {
181 return NULL;
182 }
183 if ((kc_callbacks->kcc_coredump_save_note_descriptions == NULL) != (kc_callbacks->kcc_coredump_save_note_data == NULL)) {
184 return NULL;
185 }
186 }
187
188
189 #if !defined(__LP64__)
190 /* We don't support generating 64-bit cores on 32-bit platforms */
191 if (is64bit) {
192 return NULL;
193 }
194 #endif
195
196 core_helper = zalloc_permanent_type(struct kern_coredump_core);
197 core_helper->kcc_next = NULL;
198 core_helper->kcc_refcon = refcon;
199 if (type == XNU_COREDUMP || type == USERSPACE_COREDUMP) {
200 snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%s", core_description);
201 } else {
202 assert(type == COPROCESSOR_COREDUMP);
203 /* Make sure there's room for the -cp suffix (16 - NULL char - strlen(-cp)) */
204 snprintf((char *)&core_helper->kcc_corename, MACH_CORE_FILEHEADER_NAMELEN, "%.12s-cp", core_description);
205 }
206 core_helper->kcc_is64bit = is64bit;
207 core_helper->kcc_mh_magic = mh_magic;
208 core_helper->kcc_cpu_type = cpu_type;
209 core_helper->kcc_cpu_subtype = cpu_subtype;
210 core_callbacks = &core_helper->kcc_cb;
211
212 core_callbacks->kcc_coredump_init = kc_callbacks->kcc_coredump_init;
213 core_callbacks->kcc_coredump_get_summary = kc_callbacks->kcc_coredump_get_summary;
214 core_callbacks->kcc_coredump_save_segment_descriptions = kc_callbacks->kcc_coredump_save_segment_descriptions;
215 core_callbacks->kcc_coredump_save_segment_data = kc_callbacks->kcc_coredump_save_segment_data;
216 core_callbacks->kcc_coredump_save_thread_state = kc_callbacks->kcc_coredump_save_thread_state;
217 #pragma clang diagnostic push
218 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
219 core_callbacks->kcc_coredump_save_sw_vers = kc_callbacks->kcc_coredump_save_sw_vers;
220 #pragma clang diagnostic pop
221
222
223 if (kern_coredump_config_vers >= KERN_COREDUMP_MIN_CONFIG_NOTES) {
224 core_callbacks->kcc_coredump_save_note_summary = kc_callbacks->kcc_coredump_save_note_summary;
225 core_callbacks->kcc_coredump_save_note_descriptions = kc_callbacks->kcc_coredump_save_note_descriptions;
226 core_callbacks->kcc_coredump_save_note_data = kc_callbacks->kcc_coredump_save_note_data;
227 core_callbacks->kcc_coredump_save_sw_vers_detail = kc_callbacks->kcc_coredump_save_sw_vers_detail;
228 }
229
230 if (type == XNU_COREDUMP) {
231 assert(kernel_helper == NULL);
232 kernel_helper = core_helper;
233 } else if (type == USERSPACE_COREDUMP) {
234 lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
235 core_helper->kcc_next = kern_userspace_coredump_core_list;
236 kern_userspace_coredump_core_list = core_helper;
237 lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
238 } else {
239 assert(type == COPROCESSOR_COREDUMP);
240 do {
241 core_helper->kcc_next = kern_coredump_core_list;
242 } while (!OSCompareAndSwapPtr(kern_coredump_core_list, core_helper, &kern_coredump_core_list));
243 }
244
245 OSAddAtomic(1, &coredump_registered_count);
246 kprintf("Registered coredump handler for %s\n", core_description);
247
248 return core_helper;
249 }
250
251 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)252 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks,
253 void *refcon, const char *core_description, boolean_t is64bit, uint32_t mh_magic,
254 cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
255 {
256 if (coredump_registered_count >= KERN_COREDUMP_MAX_CORES) {
257 return KERN_RESOURCE_SHORTAGE;
258 }
259
260 if (kern_register_coredump_helper_internal(kern_coredump_config_vers, kc_callbacks, refcon, core_description, COPROCESSOR_COREDUMP,
261 is64bit, mh_magic, cpu_type, cpu_subtype) == NULL) {
262 return KERN_INVALID_ARGUMENT;
263 }
264
265 return KERN_SUCCESS;
266 }
267
268 kern_return_t
kern_register_xnu_coredump_helper(kern_coredump_callback_config * kc_callbacks)269 kern_register_xnu_coredump_helper(kern_coredump_callback_config *kc_callbacks)
270 {
271 #if defined(__LP64__)
272 boolean_t is64bit = TRUE;
273 #else
274 boolean_t is64bit = FALSE;
275 #endif
276
277 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, kc_callbacks, NULL, "kernel", XNU_COREDUMP, is64bit,
278 _mh_execute_header.magic, _mh_execute_header.cputype, _mh_execute_header.cpusubtype) == NULL) {
279 return KERN_FAILURE;
280 }
281
282 return KERN_SUCCESS;
283 }
284
285 extern cpu_type_t
286 process_cpu_type(void * bsd_info);
287
288 extern cpu_type_t
289 process_cpu_subtype(void * bsd_info);
290
291 extern char *proc_name_address(void *p);
292
293 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)294 kern_register_userspace_coredump(task_t task, const char * name)
295 {
296 kern_return_t result;
297 struct kern_userspace_coredump_context * context = NULL;
298 boolean_t is64bit;
299 uint32_t mh_magic;
300 uint32_t mh_cputype;
301 uint32_t mh_cpusubtype;
302 kern_coredump_callback_config userkc_callbacks;
303
304 is64bit = task_has_64Bit_addr(task);
305 mh_magic = is64bit ? MH_MAGIC_64 : MH_MAGIC;
306 mh_cputype = process_cpu_type(get_bsdtask_info(task));
307 mh_cpusubtype = process_cpu_subtype(get_bsdtask_info(task));
308
309
310 context = kalloc_type(struct kern_userspace_coredump_context, (zalloc_flags_t)(Z_WAITOK | Z_ZERO));
311 context->task = task;
312
313 userkc_callbacks.kcc_coredump_init = user_dump_init;
314 userkc_callbacks.kcc_coredump_get_summary = user_dump_save_summary;
315 userkc_callbacks.kcc_coredump_save_segment_descriptions = user_dump_save_seg_descriptions;
316 userkc_callbacks.kcc_coredump_save_thread_state = user_dump_save_thread_state;
317 userkc_callbacks.kcc_coredump_save_sw_vers_detail = user_dump_save_sw_vers_detail;
318 userkc_callbacks.kcc_coredump_save_segment_data = user_dump_save_segment_data;
319 userkc_callbacks.kcc_coredump_save_note_summary = user_dump_save_note_summary;
320 userkc_callbacks.kcc_coredump_save_note_descriptions = user_dump_save_note_descriptions;
321 userkc_callbacks.kcc_coredump_save_note_data = user_dump_save_note_data;
322
323 if (kern_register_coredump_helper_internal(KERN_COREDUMP_CONFIG_VERSION, &userkc_callbacks, context, name, USERSPACE_COREDUMP, is64bit,
324 mh_magic, mh_cputype, mh_cpusubtype) == NULL) {
325 result = KERN_FAILURE;
326 goto finish;
327 }
328
329 result = KERN_SUCCESS;
330
331 finish:
332 if (result != KERN_SUCCESS && context != NULL) {
333 kfree_type(struct kern_userspace_coredump_context, context);
334 }
335
336 return result;
337 }
338
339 kern_return_t
kern_unregister_userspace_coredump(task_t task)340 kern_unregister_userspace_coredump(task_t task)
341 {
342 struct kern_coredump_core * current_core = NULL;
343 struct kern_coredump_core * previous_core = NULL;
344
345 lck_mtx_lock(&kern_userspace_coredump_core_list_lock);
346 current_core = kern_userspace_coredump_core_list;
347 while (current_core) {
348 struct kern_userspace_coredump_context * context = (struct kern_userspace_coredump_context *)current_core->kcc_refcon;
349 assert(context != NULL);
350 if (context->task == task) {
351 /* remove current_core from the list */
352 if (previous_core == NULL) {
353 kern_userspace_coredump_core_list = current_core->kcc_next;
354 } else {
355 previous_core->kcc_next = current_core->kcc_next;
356 }
357 break;
358 }
359 previous_core = current_core;
360 current_core = current_core->kcc_next;
361 }
362 lck_mtx_unlock(&kern_userspace_coredump_core_list_lock);
363
364 if (current_core) {
365 kfree_type(struct kern_userspace_coredump_context, current_core->kcc_refcon);
366 OSAddAtomic(-1, &coredump_registered_count);
367 return KERN_SUCCESS;
368 }
369
370 return KERN_NOT_FOUND;
371 }
372
373 /*
374 * Save LC_NOTE metadata about the core we are going to write before we write the mach header
375 */
376 static int
coredump_save_note_summary(uint64_t core_note_count,uint64_t core_note_byte_count,void * context)377 coredump_save_note_summary(uint64_t core_note_count, uint64_t core_note_byte_count, void *context)
378 {
379 processor_core_context *core_context = (processor_core_context *)context;
380
381 if (!core_note_count || !core_note_byte_count || !context) {
382 return KERN_INVALID_ARGUMENT;
383 }
384
385 core_context->core_note_count = core_context->core_notes_remaining = core_note_count;
386 core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = core_note_byte_count;
387
388 return KERN_SUCCESS;
389 }
390
391 /*
392 * Save metadata about the core we're about to write, write out the mach header
393 */
394 static int
coredump_save_summary(uint64_t core_segment_count,uint64_t core_byte_count,uint64_t thread_count,uint64_t thread_state_size,__unused uint64_t misc_bytes_count,void * context)395 coredump_save_summary(uint64_t core_segment_count, uint64_t core_byte_count,
396 uint64_t thread_count, uint64_t thread_state_size,
397 __unused uint64_t misc_bytes_count, void *context)
398 {
399 processor_core_context *core_context = (processor_core_context *)context;
400 uint32_t sizeofcmds = 0, numcmds = 0;
401 bool should_skip = false;
402 int ret = 0;
403
404 if (!core_segment_count || !core_byte_count || !thread_count || !thread_state_size
405 || (thread_state_size > KERN_COREDUMP_THREADSIZE_MAX)) {
406 return KERN_INVALID_ARGUMENT;
407 }
408
409 /* Initialize core_context */
410 core_context->core_segments_remaining = core_context->core_segment_count = core_segment_count;
411 core_context->core_segment_bytes_remaining = core_context->core_segment_byte_total = core_byte_count;
412 core_context->core_threads_remaining = core_context->core_thread_count = thread_count;
413 core_context->core_thread_state_size = thread_state_size;
414
415 /* Account for the LC_NOTE needed to store version/load information */
416 core_context->core_note_count = core_context->core_notes_remaining = (core_context->core_note_count + 1);
417 size_t vers_note_length = sizeof(main_bin_spec_note_t);
418 if (core_context->core_config->kcc_coredump_save_sw_vers_detail == NULL) {
419 vers_note_length = sizeof(legacy_bin_spec);
420 }
421 core_context->core_note_bytes_total = core_context->core_note_bytes_remaining = (core_context->core_note_bytes_total + vers_note_length);
422
423 #if defined(__LP64__)
424 if (core_context->core_is64bit) {
425 sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command_64) +
426 (core_context->core_threads_remaining * core_context->core_thread_state_size) +
427 (core_context->core_note_count * sizeof(struct note_command)));
428 core_context->core_header_size = sizeofcmds + sizeof(struct mach_header_64);
429 } else
430 #endif /* defined(__LP64__) */
431 {
432 sizeofcmds = (uint32_t)(core_context->core_segment_count * sizeof(struct segment_command) +
433 (core_context->core_threads_remaining * core_context->core_thread_state_size) +
434 (core_context->core_note_count * sizeof(struct note_command)));
435 core_context->core_header_size = sizeofcmds + sizeof(struct mach_header);
436 }
437
438
439 core_context->core_total_bytes = core_context->core_header_size + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
440 core_context->core_file_length = round_page(core_context->core_header_size) + core_context->core_segment_byte_total + core_context->core_note_bytes_total;
441 core_context->core_cur_foffset = round_page(core_context->core_header_size);
442
443 numcmds = (uint32_t)(core_context->core_segment_count + core_context->core_thread_count + core_context->core_note_count);
444
445 /*
446 * Reset the zstream and other output context before writing any data out. We do this here
447 * to update the total file length on the outvars before we start writing out.
448 */
449 ret = kdp_reset_output_vars(core_context->core_outvars, core_context->core_file_length, true, &should_skip);
450 if (ret != KERN_SUCCESS) {
451 kern_coredump_log(context, "%s() : failed to reset the out vars : kdp_reset_output_vars(%p, %llu, true, %p) returned error 0x%x\n",
452 __func__, core_context->core_outvars, core_context->core_file_length, &should_skip, ret);
453 return ret;
454 }
455
456 if (should_skip) {
457 core_context->core_should_be_skipped = TRUE;
458 return KERN_SUCCESS;
459 }
460
461 /* Construct core file header */
462 #if defined(__LP64__)
463 if (core_context->core_is64bit) {
464 struct mach_header_64 core_header = { };
465
466 core_header.magic = core_context->core_mh_magic;
467 core_header.cputype = core_context->core_cpu_type;
468 core_header.cpusubtype = core_context->core_cpu_subtype;
469 core_header.filetype = MH_CORE;
470 core_header.ncmds = numcmds;
471 core_header.sizeofcmds = sizeofcmds;
472 core_header.flags = 0;
473
474 /* Send the core_header to the output procedure */
475 ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
476 if (ret != KERN_SUCCESS) {
477 kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
478 __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
479 return ret;
480 }
481
482 core_context->core_cur_hoffset += sizeof(core_header);
483 } else
484 #endif /* defined(__LP64__) */
485 {
486 struct mach_header core_header = { };
487
488 core_header.magic = core_context->core_mh_magic;
489 core_header.cputype = core_context->core_cpu_type;
490 core_header.cpusubtype = core_context->core_cpu_subtype;
491 core_header.filetype = MH_CORE;
492 core_header.ncmds = numcmds;
493 core_header.sizeofcmds = sizeofcmds;
494 core_header.flags = 0;
495
496 /* Send the core_header to the output procedure */
497 ret = kdp_core_output(core_context->core_outvars, sizeof(core_header), (caddr_t)&core_header);
498 if (ret != KERN_SUCCESS) {
499 kern_coredump_log(context, "%s() : failed to write mach header : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
500 __func__, core_context->core_outvars, sizeof(core_header), &core_header, ret);
501 return ret;
502 }
503
504 core_context->core_cur_hoffset += sizeof(core_header);
505 }
506
507 return KERN_SUCCESS;
508 }
509
510 /*
511 * Construct a segment command for the specified segment.
512 */
513 static int
coredump_save_segment_descriptions(uint64_t seg_start,uint64_t seg_end,void * context)514 coredump_save_segment_descriptions(uint64_t seg_start, uint64_t seg_end,
515 void *context)
516 {
517 processor_core_context *core_context = (processor_core_context *)context;
518 int ret;
519 uint64_t size = seg_end - seg_start;
520
521 if (seg_end <= seg_start) {
522 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses : start 0x%llx >= end 0x%llx\n",
523 __func__, seg_start, seg_end, context, seg_start, seg_end);
524 return KERN_INVALID_ARGUMENT;
525 }
526
527 if (core_context->core_segments_remaining == 0) {
528 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : coredump_save_segment_descriptions() called too many times, %llu segment descriptions already recorded\n",
529 __func__, seg_start, seg_end, context, core_context->core_segment_count);
530 return KERN_INVALID_ARGUMENT;
531 }
532
533 /* Construct segment command */
534 #if defined(__LP64__)
535 if (core_context->core_is64bit) {
536 struct segment_command_64 seg_command = { };
537
538 if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
539 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
540 __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
541 return KERN_NO_SPACE;
542 }
543
544 seg_command.cmd = LC_SEGMENT_64;
545 seg_command.cmdsize = sizeof(seg_command);
546 seg_command.segname[0] = 0;
547 seg_command.vmaddr = seg_start;
548 seg_command.vmsize = size;
549 seg_command.fileoff = core_context->core_cur_foffset;
550 seg_command.filesize = size;
551 seg_command.maxprot = VM_PROT_READ;
552 seg_command.initprot = VM_PROT_READ;
553
554 /* Flush new command to output */
555 ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
556 if (ret != KERN_SUCCESS) {
557 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu. kdp_core_output(%p, %lu, %p) returned error %d\n",
558 __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
559 core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
560 return ret;
561 }
562
563 core_context->core_cur_hoffset += sizeof(seg_command);
564 } else
565 #endif /* defined(__LP64__) */
566 {
567 struct segment_command seg_command = { };
568
569 if (seg_start > UINT32_MAX || seg_end > UINT32_MAX) {
570 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : called with invalid addresses for 32-bit : start 0x%llx, end 0x%llx\n",
571 __func__, seg_start, seg_end, context, seg_start, seg_end);
572 return KERN_INVALID_ARGUMENT;
573 }
574
575 if (core_context->core_cur_hoffset + sizeof(seg_command) > core_context->core_header_size) {
576 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : ran out of space to save commands with %llu of %llu remaining\n",
577 __func__, seg_start, seg_end, context, core_context->core_segments_remaining, core_context->core_segment_count);
578 return KERN_NO_SPACE;
579 }
580
581 seg_command.cmd = LC_SEGMENT;
582 seg_command.cmdsize = sizeof(seg_command);
583 seg_command.segname[0] = 0;
584 seg_command.vmaddr = (uint32_t) seg_start;
585 seg_command.vmsize = (uint32_t) size;
586 seg_command.fileoff = (uint32_t) core_context->core_cur_foffset;
587 seg_command.filesize = (uint32_t) size;
588 seg_command.maxprot = VM_PROT_READ;
589 seg_command.initprot = VM_PROT_READ;
590
591 /* Flush new command to output */
592 ret = kdp_core_output(core_context->core_outvars, sizeof(seg_command), (caddr_t)&seg_command);
593 if (ret != KERN_SUCCESS) {
594 kern_coredump_log(context, "%s(0x%llx, 0x%llx, %p) : failed to write segment %llu of %llu : kdp_core_output(%p, %lu, %p) returned error 0x%x\n",
595 __func__, seg_start, seg_end, context, core_context->core_segment_count - core_context->core_segments_remaining,
596 core_context->core_segment_count, core_context->core_outvars, sizeof(seg_command), &seg_command, ret);
597 return ret;
598 }
599
600 core_context->core_cur_hoffset += sizeof(seg_command);
601 }
602
603 /* Update coredump context */
604 core_context->core_segments_remaining--;
605 core_context->core_cur_foffset += size;
606
607 return KERN_SUCCESS;
608 }
609
610 /*
611 * Construct a LC_NOTE command for the specified note
612 */
613 static int
coredump_save_note_description(const char * data_owner,uint64_t length,void * context)614 coredump_save_note_description(const char * data_owner, uint64_t length, void *context)
615 {
616 processor_core_context *core_context = (processor_core_context *)context;
617 int ret;
618
619 if (data_owner == NULL || (strlen(data_owner) == 0)) {
620 kern_coredump_log(context, "%s() called with invalid data_owner\n", __func__);
621 return KERN_INVALID_ARGUMENT;
622 }
623
624 if (core_context->core_notes_remaining == 0) {
625 kern_coredump_log(context, "%s() called too many times, %llu note descriptions already recorded\n",
626 __func__, core_context->core_note_count);
627 return KERN_INVALID_ARGUMENT;
628 }
629
630 struct note_command note = { .cmd = LC_NOTE,
631 .cmdsize = sizeof(struct note_command),
632 .offset = core_context->core_cur_foffset,
633 .size = length, };
634 strlcpy((char *) ¬e.data_owner, data_owner, sizeof(note.data_owner));
635
636 /* Flush new command to output */
637 ret = kdp_core_output(core_context->core_outvars, sizeof(note), (caddr_t)¬e);
638 if (ret != KERN_SUCCESS) {
639 kern_coredump_log(context, "%s() : failed to write note %llu of %llu : kdp_core_output() returned error 0x%x\n",
640 __func__, core_context->core_note_count - core_context->core_notes_remaining,
641 core_context->core_note_count, ret);
642 return ret;
643 }
644
645 /* Update coredump context */
646 core_context->core_cur_foffset += length;
647 core_context->core_cur_hoffset += sizeof(note);
648 core_context->core_notes_remaining--;
649
650 return KERN_SUCCESS;
651 }
652
653 /*
654 * Save thread state.
655 *
656 * Passed thread_state is expected to be a struct thread_command
657 */
658 static int
coredump_save_thread_state(void * thread_state,void * context)659 coredump_save_thread_state(void *thread_state, void *context)
660 {
661 processor_core_context *core_context = (processor_core_context *)context;
662 struct thread_command *tc = (struct thread_command *)thread_state;
663 int ret;
664
665 if (tc->cmd != LC_THREAD) {
666 kern_coredump_log(context, "%s() : found %d expected LC_THREAD (%d)\n", __func__, tc->cmd, LC_THREAD);
667 return KERN_INVALID_ARGUMENT;
668 }
669
670 if (core_context->core_cur_hoffset + core_context->core_thread_state_size > core_context->core_header_size) {
671 kern_coredump_log(context, "%s() : ran out of space to save threads with %llu of %llu remaining\n", __func__,
672 core_context->core_threads_remaining, core_context->core_thread_count);
673 return KERN_NO_SPACE;
674 }
675
676 ret = kdp_core_output(core_context->core_outvars, core_context->core_thread_state_size, (caddr_t)thread_state);
677 if (ret != KERN_SUCCESS) {
678 kern_coredump_log(context, "%s() : failed to write thread data : kdp_core_output() returned 0x%x\n", __func__, ret);
679 return ret;
680 }
681
682 core_context->core_threads_remaining--;
683 core_context->core_cur_hoffset += core_context->core_thread_state_size;
684
685 return KERN_SUCCESS;
686 }
687
688 static int
coredump_save_segment_data(void * seg_data,uint64_t length,void * context)689 coredump_save_segment_data(void *seg_data, uint64_t length, void *context)
690 {
691 int ret;
692 processor_core_context *core_context = (processor_core_context *)context;
693
694 if (length > core_context->core_segment_bytes_remaining) {
695 kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
696 seg_data, length, context, core_context->core_segment_byte_total - core_context->core_segment_bytes_remaining,
697 core_context->core_segment_bytes_remaining);
698 return KERN_INVALID_ARGUMENT;
699 }
700
701 ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)seg_data);
702 if (ret != KERN_SUCCESS) {
703 kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
704 core_context->core_segment_bytes_remaining, ret);
705 return ret;
706 }
707
708 core_context->core_segment_bytes_remaining -= length;
709 core_context->core_cur_foffset += length;
710
711 return KERN_SUCCESS;
712 }
713
714 static int
coredump_save_note_data(void * note_data,uint64_t length,void * context)715 coredump_save_note_data(void *note_data, uint64_t length, void *context)
716 {
717 int ret;
718 processor_core_context *core_context = (processor_core_context *)context;
719
720 if (length > core_context->core_note_bytes_remaining) {
721 kern_coredump_log(context, "%s(%p, %llu, %p) : called with too much data, %llu written, %llu left\n", __func__,
722 note_data, length, context, core_context->core_note_bytes_total - core_context->core_note_bytes_remaining,
723 core_context->core_note_bytes_remaining);
724 return KERN_INVALID_ARGUMENT;
725 }
726
727 ret = kdp_core_output(core_context->core_outvars, length, (caddr_t)note_data);
728 if (ret != KERN_SUCCESS) {
729 kern_coredump_log(context, "%s() : failed to write data (%llu bytes remaining) :%d\n", __func__,
730 core_context->core_note_bytes_remaining, ret);
731 return ret;
732 }
733
734 core_context->core_note_bytes_remaining -= length;
735 core_context->core_cur_foffset += length;
736
737 return KERN_SUCCESS;
738 }
739
740 static int
coredump_save_sw_vers_legacy(void * sw_vers,uint64_t length,void * context)741 coredump_save_sw_vers_legacy(void *sw_vers, uint64_t length, void *context)
742 {
743 processor_core_context *core_context = (processor_core_context *)context;
744 int ret;
745
746 if (length > KERN_COREDUMP_VERSIONSTRINGMAXSIZE || !length) {
747 kern_coredump_log(context, "%s(%p, %llu, %p) : called with invalid length %llu\n", __func__,
748 sw_vers, length, context, length);
749 return KERN_INVALID_ARGUMENT;
750 }
751
752 uint32_t version = LEGACY_BIN_SPEC_VERSION;
753 ret = coredump_save_note_data(&version, sizeof(version), context);
754 if (ret != KERN_SUCCESS) {
755 kern_coredump_log(context, "%s() : failed to write legacy bin spec version : coredump_save_note_data() returned 0x%x\n",
756 __func__, ret);
757 return ret;
758 }
759
760 ret = coredump_save_note_data(sw_vers, length, context);
761 if (ret != KERN_SUCCESS) {
762 kern_coredump_log(context, "%s() : failed to write sw_vers string : coredump_save_note_data() returned 0x%x\n",
763 __func__, ret);
764 return ret;
765 }
766
767 if (length < KERN_COREDUMP_VERSIONSTRINGMAXSIZE) {
768 /* Zero fill to the full size */
769 uint64_t length_to_zero = (KERN_COREDUMP_VERSIONSTRINGMAXSIZE - length);
770 ret = kdp_core_output(core_context->core_outvars, length_to_zero, NULL);
771 if (ret != KERN_SUCCESS) {
772 kern_coredump_log(context, "%s() : failed to write zero fill padding : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
773 __func__, core_context->core_outvars, length_to_zero, ret);
774 return ret;
775 }
776
777 core_context->core_note_bytes_remaining -= length_to_zero;
778 core_context->core_cur_foffset += length_to_zero;
779 }
780
781 return KERN_SUCCESS;
782 }
783
784 static int
coredump_save_sw_vers(uint64_t address,uuid_t uuid,uint32_t log2_pagesize,void * context)785 coredump_save_sw_vers(uint64_t address, uuid_t uuid, uint32_t log2_pagesize, void *context)
786 {
787 processor_core_context *core_context = (processor_core_context *)context;
788 int ret;
789
790 uint32_t type = bin_spec_map[core_context->core_type];
791 main_bin_spec_note_t spec = {
792 .version = MAIN_BIN_SPEC_VERSION,
793 .type = type,
794 .address = address,
795 .log2_pagesize = log2_pagesize,
796 };
797 uuid_copy(*((uuid_t *)&spec.uuid), uuid);
798
799 ret = coredump_save_note_data(&spec, sizeof(spec), context);
800 if (ret != KERN_SUCCESS) {
801 kern_coredump_log(context, "%s() : failed to write main bin spec structure : coredump_save_note_data() returned 0x%x\n", __func__, ret);
802 return ret;
803 }
804
805 return KERN_SUCCESS;
806 }
807
808 static kern_return_t
kern_coredump_routine(void * core_outvars,struct kern_coredump_core * current_core,uint64_t core_begin_offset,uint64_t * core_file_length,boolean_t * header_update_failed,kern_coredump_type_t type,uint64_t details_flags)809 kern_coredump_routine(void *core_outvars, struct kern_coredump_core *current_core, uint64_t core_begin_offset, uint64_t *core_file_length, boolean_t *header_update_failed, kern_coredump_type_t type, uint64_t details_flags)
810 {
811 #if MONOTONIC
812 uint64_t start_cycles;
813 uint64_t end_cycles;
814 #endif // MONOTONIC
815 kern_return_t ret;
816 processor_core_context context = { };
817 *core_file_length = 0;
818 *header_update_failed = FALSE;
819
820 #if MONOTONIC
821 start_cycles = mt_cur_cpu_cycles();
822 #endif // MONOTONIC
823
824 /* Setup the coredump context */
825 context.core_outvars = core_outvars;
826 context.core_config = ¤t_core->kcc_cb;
827 context.core_refcon = current_core->kcc_refcon;
828 context.core_is64bit = current_core->kcc_is64bit;
829 context.core_mh_magic = current_core->kcc_mh_magic;
830 context.core_cpu_type = current_core->kcc_cpu_type;
831 context.core_cpu_subtype = current_core->kcc_cpu_subtype;
832 context.core_type = type;
833
834 kern_coredump_log(&context, "\nBeginning coredump of %s\n", current_core->kcc_corename);
835
836 if (current_core->kcc_cb.kcc_coredump_init != NULL) {
837 ret = current_core->kcc_cb.kcc_coredump_init(context.core_refcon, &context);
838 if (ret == KERN_NODE_DOWN) {
839 kern_coredump_log(&context, "coredump_init returned KERN_NODE_DOWN, skipping this core\n");
840 return KERN_SUCCESS;
841 } else if (ret != KERN_SUCCESS) {
842 kern_coredump_log(&context, "(%s) : coredump_init failed with %d\n", __func__, ret);
843 return ret;
844 }
845 }
846
847 /* Retrieve information about LC_NOTE data we will write out as part of the core before we populate the general header */
848 if (current_core->kcc_cb.kcc_coredump_save_note_summary != NULL) {
849 ret = current_core->kcc_cb.kcc_coredump_save_note_summary(context.core_refcon, coredump_save_note_summary, &context);
850 if (ret != KERN_SUCCESS) {
851 kern_coredump_log(&context, "(%s) : save_note_note_summary failed with %d\n", __func__, ret);
852 return ret;
853 }
854 }
855
856 /* Populate the context with metadata about the corefile (cmd info, sizes etc) */
857 ret = current_core->kcc_cb.kcc_coredump_get_summary(context.core_refcon, coredump_save_summary, &context);
858 if (ret != KERN_SUCCESS) {
859 kern_coredump_log(&context, "(%s) : get_summary failed with %d\n", __func__, ret);
860 return ret;
861 }
862
863 if (context.core_should_be_skipped) {
864 kern_coredump_log(&context, "Skipping coredump\n");
865 return KERN_SUCCESS;
866 }
867
868 if (context.core_header_size == 0) {
869 kern_coredump_log(&context, "(%s) : header size not populated after coredump_get_summary\n", __func__);
870 return KERN_FAILURE;
871 }
872
873 /* Save the segment descriptions for the segments to be included */
874 ret = current_core->kcc_cb.kcc_coredump_save_segment_descriptions(context.core_refcon, coredump_save_segment_descriptions,
875 &context);
876 if (ret != KERN_SUCCESS) {
877 kern_coredump_log(&context, "(%s) : save_segment_descriptions failed with %d\n", __func__, ret);
878 return ret;
879 }
880
881 if (context.core_segments_remaining != 0) {
882 kern_coredump_log(&context, "(%s) : save_segment_descriptions returned without all segment descriptions written, %llu of %llu remaining\n",
883 __func__, context.core_segments_remaining, context.core_segment_count);
884 return KERN_FAILURE;
885 }
886
887 /* write out the LC_NOTE with the binary info */
888 if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
889 ret = coredump_save_note_description(MAIN_BIN_SPEC_DATA_OWNER, sizeof(main_bin_spec_note_t), &context);
890 } else {
891 ret = coredump_save_note_description(DATA_OWNER_LEGACY_BIN_SPEC, sizeof(legacy_bin_spec), &context);
892 }
893 if (ret != KERN_SUCCESS) {
894 kern_coredump_log(&context, "(%s) : coredump_save_note_description returned %d while writing binary info LC_NOTE description", __func__, ret);
895 return ret;
896 }
897
898 /* Save LC_NOTE desciptions for any additional notes to be included */
899 if (current_core->kcc_cb.kcc_coredump_save_note_descriptions != NULL) {
900 ret = current_core->kcc_cb.kcc_coredump_save_note_descriptions(context.core_refcon, coredump_save_note_description, &context);
901 if (ret != KERN_SUCCESS) {
902 kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_descriptions failed with %d\n", __func__, ret);
903 return ret;
904 }
905 }
906
907 if (context.core_notes_remaining != 0) {
908 kern_coredump_log(&context, "(%s) : save_note_descriptions returned without all note descriptions written, %llu of %llu remaining\n",
909 __func__, context.core_notes_remaining, context.core_note_count);
910 return KERN_FAILURE;
911 }
912
913 /*
914 * Save the thread commands/state
915 *
916 * TODO: Should this buffer be allocated at boot rather than on the stack?
917 */
918 if (context.core_thread_state_size) {
919 char threadstatebuf[context.core_thread_state_size];
920 ret = current_core->kcc_cb.kcc_coredump_save_thread_state(context.core_refcon, &threadstatebuf, coredump_save_thread_state,
921 &context);
922 if (ret != KERN_SUCCESS) {
923 kern_coredump_log(&context, "(%s) : save_thread_state failed with %d\n", __func__, ret);
924 return ret;
925 }
926 }
927
928 if (context.core_threads_remaining != 0) {
929 kern_coredump_log(&context, "(%s) : save_thread_state returned without all thread descriptions written, %llu of %llu remaining\n",
930 __func__, context.core_threads_remaining, context.core_thread_count);
931 return KERN_FAILURE;
932 }
933 assert(context.core_cur_hoffset == context.core_header_size);
934
935 /* Zero fill between the end of the header and the beginning of the segment data file offset */
936 ret = kdp_core_output(context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), NULL);
937 if (ret != KERN_SUCCESS) {
938 kern_coredump_log(&context, "(kern_coredump_routine) : failed to write zero fill padding (%llu bytes remaining) : kdp_core_output(%p, %llu, NULL) returned 0x%x\n",
939 context.core_segment_bytes_remaining, context.core_outvars, (round_page(context.core_header_size) - context.core_header_size), ret);
940 return ret;
941 }
942
943 /* Reset our local current file offset before we start writing out segment data */
944 context.core_cur_foffset = round_page(context.core_header_size);
945
946 ret = current_core->kcc_cb.kcc_coredump_save_segment_data(context.core_refcon, coredump_save_segment_data, &context);
947 if (ret != KERN_SUCCESS) {
948 kern_coredump_log(&context, "coredump_save_segment_data failed with %d\n", ret);
949 return ret;
950 }
951
952 if (context.core_segment_bytes_remaining != 0) {
953 kern_coredump_log(&context, "(kern_coredump_routine) : save_segment_data returned without all segment data written, %llu of %llu remaining\n",
954 context.core_segment_bytes_remaining, context.core_segment_byte_total);
955 return KERN_FAILURE;
956 }
957
958 /* Save out the LC_NOTE segment data, starting with the binary info / sw vers one */
959 if (current_core->kcc_cb.kcc_coredump_save_sw_vers_detail != NULL) {
960 ret = current_core->kcc_cb.kcc_coredump_save_sw_vers_detail(context.core_refcon, coredump_save_sw_vers, &context);
961 if (ret != KERN_SUCCESS) {
962 kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers_detail_cb failed with 0x%x\n", __func__, ret);
963 return ret;
964 }
965 } else {
966 #pragma clang diagnostic push
967 #pragma clang diagnostic ignored "-Wdeprecated-declarations"
968 ret = current_core->kcc_cb.kcc_coredump_save_sw_vers(context.core_refcon, coredump_save_sw_vers_legacy, &context);
969 #pragma clang diagnostic pop
970 if (ret != KERN_SUCCESS) {
971 kern_coredump_log(&context, "(%s) : kcc_coredump_save_sw_vers failed with 0x%x\n", __func__, ret);
972 return ret;
973 }
974 }
975
976 if (current_core->kcc_cb.kcc_coredump_save_note_data != NULL) {
977 ret = current_core->kcc_cb.kcc_coredump_save_note_data(context.core_refcon, coredump_save_note_data, &context);
978 if (ret != KERN_SUCCESS) {
979 kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data failed with 0x%x\n", __func__, ret);
980 return ret;
981 }
982 }
983
984 if (context.core_note_bytes_remaining != 0) {
985 kern_coredump_log(&context, "(%s) : kcc_coredump_save_note_data returned without all note data written, %llu of %llu remaining\n",
986 __func__, context.core_note_bytes_remaining, context.core_note_bytes_total);
987 return KERN_FAILURE;
988 }
989
990
991 /* Flush the last data out */
992 ret = kdp_core_output(context.core_outvars, 0, NULL);
993 if (ret != KERN_SUCCESS) {
994 kern_coredump_log(&context, "(kern_coredump_routine) : failed to flush final core data : kdp_core_output(%p, 0, NULL) returned 0x%x\n",
995 context.core_outvars, ret);
996 return ret;
997 }
998
999 kern_coredump_log(&context, "Done\nCoredump complete of %s, dumped %llu segments (%llu bytes), %llu threads (%llu bytes) overall uncompressed file length %llu bytes.",
1000 current_core->kcc_corename, context.core_segment_count, context.core_segment_byte_total, context.core_thread_count,
1001 (context.core_thread_count * context.core_thread_state_size), context.core_file_length);
1002
1003 #if MONOTONIC
1004 end_cycles = mt_cur_cpu_cycles();
1005 kern_coredump_log(&context, "\nCore dump took %llu cycles\n", end_cycles - start_cycles);
1006 #endif // MONOTONIC
1007
1008 if (core_begin_offset) {
1009 /* If we're writing to disk (we have a begin offset), we need to update the header */
1010 ret = kern_dump_record_file(context.core_outvars, current_core->kcc_corename, core_begin_offset, &context.core_file_length_compressed, details_flags);
1011 if (ret != KERN_SUCCESS) {
1012 *header_update_failed = TRUE;
1013 kern_coredump_log(&context, "\n(kern_coredump_routine) : kern_dump_record_file failed with %d\n", ret);
1014 return ret;
1015 }
1016 }
1017
1018 kern_coredump_log(&context, " Compressed file length is %llu bytes\n", context.core_file_length_compressed);
1019
1020 *core_file_length = context.core_file_length_compressed;
1021
1022 return KERN_SUCCESS;
1023 }
1024
1025 /*
1026 * Collect coprocessor and userspace coredumps
1027 */
1028 static kern_return_t
kern_do_auxiliary_coredump(void * core_outvars,struct kern_coredump_core * list,uint64_t * last_file_offset,uint64_t details_flags)1029 kern_do_auxiliary_coredump(void * core_outvars, struct kern_coredump_core * list, uint64_t * last_file_offset, uint64_t details_flags)
1030 {
1031 struct kern_coredump_core *current_core = list;
1032 uint64_t prev_core_length = 0;
1033 boolean_t header_update_failed = FALSE;
1034 kern_coredump_type_t type = current_core == kern_userspace_coredump_core_list ? USERSPACE_COREDUMP : COPROCESSOR_COREDUMP;
1035 kern_return_t ret = KERN_SUCCESS;
1036 kern_return_t cur_ret = KERN_SUCCESS;
1037
1038 if (type == USERSPACE_COREDUMP && kdp_lck_mtx_lock_spin_is_acquired(&kern_userspace_coredump_core_list_lock)) {
1039 // Userspace coredump list was being modified at the time of the panic. Skip collecting userspace coredumps
1040 kern_coredump_log(NULL, "Skipping userspace coredump, coredump list is locked\n");
1041 return KERN_FAILURE;
1042 }
1043
1044 while (current_core) {
1045 /* Seek to the beginning of the next file */
1046 cur_ret = kern_dump_seek_to_next_file(core_outvars, *last_file_offset);
1047 if (cur_ret != KERN_SUCCESS) {
1048 kern_coredump_log(NULL, "Failed to seek to beginning of next core\n");
1049 return KERN_FAILURE;
1050 }
1051
1052 cur_ret = kern_coredump_routine(core_outvars, current_core, *last_file_offset, &prev_core_length, &header_update_failed, type, details_flags);
1053 if (cur_ret != KERN_SUCCESS) {
1054 // As long as we didn't fail while updating the header for the raw file, we should be able to try
1055 // to capture other corefiles.
1056 if (header_update_failed) {
1057 // The header may be in an inconsistent state, so bail now
1058 return KERN_FAILURE;
1059 } else {
1060 // Try to capture other corefiles even if one failed, update the overall return
1061 // status though
1062 prev_core_length = 0;
1063 ret = KERN_FAILURE;
1064 }
1065 }
1066
1067 /* Calculate the offset of the beginning of the next core in the raw file */
1068 *last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1069 prev_core_length = 0;
1070 current_core = current_core->kcc_next;
1071 }
1072
1073 return ret;
1074 }
1075
1076 kern_return_t
kern_do_coredump(void * core_outvars,boolean_t kernel_only,uint64_t first_file_offset,uint64_t * last_file_offset,uint64_t details_flags)1077 kern_do_coredump(void *core_outvars, boolean_t kernel_only, uint64_t first_file_offset, uint64_t *last_file_offset, uint64_t details_flags)
1078 {
1079 uint64_t prev_core_length = 0;
1080 kern_return_t cur_ret = KERN_SUCCESS, ret = KERN_SUCCESS;
1081 boolean_t header_update_failed = FALSE;
1082
1083 assert(last_file_offset != NULL);
1084
1085 *last_file_offset = first_file_offset;
1086 cur_ret = kern_coredump_routine(core_outvars, kernel_helper, *last_file_offset, &prev_core_length, &header_update_failed, XNU_COREDUMP, details_flags);
1087 if (cur_ret != KERN_SUCCESS) {
1088 // As long as we didn't fail while updating the header for the raw file, we should be able to try
1089 // to capture other corefiles.
1090 if (header_update_failed) {
1091 // The header may be in an inconsistent state, so bail now
1092 return KERN_FAILURE;
1093 } else {
1094 prev_core_length = 0;
1095 ret = KERN_FAILURE;
1096 }
1097 }
1098
1099 *last_file_offset = roundup(((*last_file_offset) + prev_core_length), KERN_COREDUMP_BEGIN_FILEBYTES_ALIGN);
1100
1101 if (kernel_only) {
1102 return ret;
1103 }
1104
1105 // Collect coprocessor coredumps first, in case userspace coredumps fail
1106 ret = kern_do_auxiliary_coredump(core_outvars, kern_coredump_core_list, last_file_offset, details_flags);
1107 if (ret != KERN_SUCCESS) {
1108 kern_coredump_log(NULL, "Failed to dump coprocessor cores\n");
1109 return ret;
1110 }
1111
1112 ret = kern_do_auxiliary_coredump(core_outvars, kern_userspace_coredump_core_list, last_file_offset, details_flags);
1113 if (ret != KERN_SUCCESS) {
1114 kern_coredump_log(NULL, "Failed to dump userspace process cores\n");
1115 return ret;
1116 }
1117
1118 return KERN_SUCCESS;
1119 }
1120 #else /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1121
1122 kern_return_t
kern_register_coredump_helper(int kern_coredump_config_vers,const kern_coredump_callback_config * kc_callbacks,void * refcon,const char * core_description,boolean_t is64bit,uint32_t mh_magic,cpu_type_t cpu_type,cpu_subtype_t cpu_subtype)1123 kern_register_coredump_helper(int kern_coredump_config_vers, const kern_coredump_callback_config *kc_callbacks, void* refcon,
1124 const char *core_description, boolean_t is64bit, uint32_t mh_magic,
1125 cpu_type_t cpu_type, cpu_subtype_t cpu_subtype)
1126 {
1127 #pragma unused(kern_coredump_config_vers, kc_callbacks, refcon, core_description, is64bit, mh_magic, cpu_type, cpu_subtype)
1128 return KERN_NOT_SUPPORTED;
1129 }
1130
1131 kern_return_t
kern_register_userspace_coredump(task_t task,const char * name)1132 kern_register_userspace_coredump(task_t task, const char * name)
1133 {
1134 (void)task;
1135 (void)name;
1136 return KERN_NOT_SUPPORTED;
1137 }
1138
1139 kern_return_t
kern_unregister_userspace_coredump(task_t task)1140 kern_unregister_userspace_coredump(task_t task)
1141 {
1142 (void)task;
1143 return KERN_NOT_SUPPORTED;
1144 }
1145 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
1146
1147 /*
1148 * Must be callable with a NULL context
1149 */
1150 void
kern_coredump_log(void * context,const char * string,...)1151 kern_coredump_log(void *context, const char *string, ...)
1152 {
1153 #pragma unused(context)
1154 va_list coredump_log_args;
1155
1156 va_start(coredump_log_args, string);
1157 _doprnt(string, &coredump_log_args, consdebug_putc, 16);
1158 va_end(coredump_log_args);
1159
1160 #if defined(__arm64__)
1161 paniclog_flush();
1162 #endif
1163 }
1164