1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kdp/processor_core.h>
30 #include <kdp/kdp_core.h>
31 #include <kdp/kdp_common.h>
32 #include <vm/vm_map.h>
33
34 kern_return_t
user_dump_init(void * refcon,void * context)35 user_dump_init(void *refcon, void *context)
36 {
37 kern_return_t err = KERN_NODE_DOWN;
38 struct kern_userspace_coredump_context * uccontext = refcon;
39
40 if (!uccontext) {
41 kern_coredump_log(context, "%s: context allocation failure\n", __func__);
42 goto finish;
43 }
44
45 if (!uccontext->task) {
46 kern_coredump_log(context, "%s: no task is set\n", __func__);
47 goto finish;
48 }
49
50 // Skip inactive tasks
51 if (!uccontext->task->active) {
52 kern_coredump_log(context, "%s: skipping inactive task\n", __func__);
53 goto finish;
54 }
55
56 // If task is locked, it is unsafe to iterate over its threads
57 if (kdp_task_is_locked(uccontext->task)) {
58 kern_coredump_log(context, "%s: skipping locked task\n", __func__);
59 goto finish;
60 }
61
62 // If vm map is locked exclusively, it is unsafe to traverse vm mappings
63 if (kdp_vm_map_is_acquired_exclusive(uccontext->task->map)) {
64 kern_coredump_log(context, "%s: skipping task with locked vm map\n", __func__);
65 goto finish;
66 }
67
68 err = KERN_SUCCESS;
69
70 finish:
71 return err;
72 }
73
74 struct user_dump_preflight_context {
75 struct kern_userspace_coredump_context * uccontext;
76 uint32_t region_count;
77 uint64_t dumpable_bytes;
78 };
79
80 static kern_return_t
user_dump_map_traverse_preflight_callback(vm_offset_t start,vm_offset_t end,void * context)81 user_dump_map_traverse_preflight_callback(vm_offset_t start, vm_offset_t end, void *context)
82 {
83 struct user_dump_preflight_context *udc_preflight = context;
84
85 assert(start < end);
86
87 udc_preflight->region_count++;
88 udc_preflight->dumpable_bytes += (end - start);
89
90 return KERN_SUCCESS;
91 }
92
93 kern_return_t
user_dump_save_summary(void * refcon,core_save_summary_cb callback,void * context)94 user_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context)
95 {
96 struct kern_userspace_coredump_context * uccontext = refcon;
97 struct user_dump_preflight_context udc_preflight = {.uccontext = uccontext, .region_count = 0, .dumpable_bytes = 0};
98 uint64_t thread_state_size = 0, thread_count = 0;
99 kern_return_t ret;
100
101 ret = kdp_traverse_mappings(uccontext->task,
102 KDP_FAULT_FLAGS_ENABLE_FAULTING,
103 KDP_TRAVERSE_MAPPINGS_FLAGS_NONE,
104 user_dump_map_traverse_preflight_callback,
105 &udc_preflight);
106 if (ret != KERN_SUCCESS) {
107 kern_coredump_log(context, "%s: vm map traversal failed: %d\n", __func__, ret);
108 return ret;
109 }
110
111 kern_collect_userth_state_size(uccontext->task, &thread_count, &thread_state_size);
112 return callback(udc_preflight.region_count, udc_preflight.dumpable_bytes,
113 thread_count, thread_state_size, 0, context);
114 }
115
116 struct user_dump_send_seg_desc_context {
117 core_save_segment_descriptions_cb callback;
118 void *context;
119 };
120
121 static kern_return_t
user_dump_map_traverse_send_segdesc_callback(vm_offset_t start,vm_offset_t end,void * context)122 user_dump_map_traverse_send_segdesc_callback(vm_offset_t start, vm_offset_t end, void *context)
123 {
124 struct user_dump_send_seg_desc_context *uds_context = context;
125
126 assert(start < end);
127
128 uint64_t seg_start = (uint64_t) start;
129 uint64_t seg_end = (uint64_t) end;
130
131 return uds_context->callback(seg_start, seg_end, uds_context->context);
132 }
133
134 kern_return_t
user_dump_save_seg_descriptions(void * refcon,core_save_segment_descriptions_cb callback,void * context)135 user_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context)
136 {
137 struct kern_userspace_coredump_context * uccontext = refcon;
138 struct user_dump_send_seg_desc_context uds_context;
139 uds_context.callback = callback;
140 uds_context.context = context;
141 kern_return_t ret = kdp_traverse_mappings(uccontext->task,
142 KDP_FAULT_FLAGS_ENABLE_FAULTING,
143 KDP_TRAVERSE_MAPPINGS_FLAGS_NONE,
144 user_dump_map_traverse_send_segdesc_callback,
145 &uds_context);
146 if (ret != KERN_SUCCESS) {
147 kern_coredump_log(context, "%s: vm map traversal failed: %d\n", __func__, ret);
148 return ret;
149 }
150 return KERN_SUCCESS;
151 }
152
153 kern_return_t
user_dump_save_thread_state(void * refcon,void * buf,core_save_thread_state_cb callback,void * context)154 user_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
155 {
156 struct kern_userspace_coredump_context * uccontext = refcon;
157 uint64_t thread_state_size = 0, thread_count = 0;
158 thread_t thread = THREAD_NULL;
159
160 kern_collect_userth_state_size(uccontext->task, &thread_count, &thread_state_size);
161 queue_iterate(&uccontext->task->threads, thread, thread_t, task_threads) {
162 kern_collect_userth_state(uccontext->task, thread, buf, thread_state_size);
163
164 kern_return_t ret = callback(buf, context);
165 if (ret != KERN_SUCCESS) {
166 return ret;
167 }
168 }
169
170 return KERN_SUCCESS;
171 }
172
173 kern_return_t
user_dump_save_sw_vers_detail(void * refcon,core_save_sw_vers_detail_cb callback,void * context)174 user_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context)
175 {
176 struct kern_userspace_coredump_context * uccontext = refcon;
177 uint64_t dyld_load_address;
178 uuid_t dyld_uuid;
179 size_t task_page_size;
180
181 /*
182 * For user coredumps we want the dyld UUID to be stored in the main bin spec LC_NOTE
183 */
184 kern_return_t ret = kdp_task_dyld_info(uccontext->task, KDP_FAULT_FLAGS_ENABLE_FAULTING, &dyld_load_address, dyld_uuid, &task_page_size);
185 if (ret != KERN_SUCCESS) {
186 return ret;
187 }
188 return callback(dyld_load_address, dyld_uuid, bit_log2(task_page_size), context);
189 }
190
191 struct user_dump_send_segdata_context {
192 core_save_segment_data_cb callback;
193 void *context;
194 };
195
196 static kern_return_t
user_dump_map_traverse_send_segdata_callback(vm_offset_t physstart,vm_offset_t physend,void * context)197 user_dump_map_traverse_send_segdata_callback(vm_offset_t physstart, vm_offset_t physend, void * context)
198 {
199 struct user_dump_send_segdata_context * uds_context = context;
200
201 assert(physstart < physend);
202
203 void * vphysstart = (void *) phystokv(physstart);
204 return uds_context->callback(vphysstart, (uint64_t)((uintptr_t)physend - (uintptr_t)physstart), uds_context->context);
205 }
206
207 kern_return_t
user_dump_save_segment_data(void * refcon,core_save_segment_data_cb callback,void * context)208 user_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context)
209 {
210 struct kern_userspace_coredump_context * uccontext = refcon;
211 struct user_dump_send_segdata_context uds_context = {.callback = callback, .context = context};
212 kern_return_t ret = kdp_traverse_mappings(uccontext->task,
213 KDP_FAULT_FLAGS_ENABLE_FAULTING,
214 KDP_TRAVERSE_MAPPINGS_FLAGS_PHYSICAL,
215 user_dump_map_traverse_send_segdata_callback,
216 &uds_context);
217 if (ret != KERN_SUCCESS) {
218 kern_coredump_log(context, "%s: vm map traversal failed: %d\n", __func__, ret);
219 return ret;
220 }
221
222 return KERN_SUCCESS;
223 }
224
225
226 /*
227 * Add a LC_NOTE to the core to indicate that it was created during a kernel panic.
228 */
229
230 #define USER_COREDUMP_DATA_OWNER "panic context"
231
232 extern char kernel_uuid_string[];
233
234 struct user_coredump_note {
235 uuid_string_t kernel_uuid_string;
236 };
237
238 kern_return_t
user_dump_save_note_summary(void * refcon __unused,core_save_note_summary_cb callback,void * context)239 user_dump_save_note_summary(void *refcon __unused, core_save_note_summary_cb callback, void *context)
240 {
241 return callback(1 /* one LC_NOTE */, sizeof(struct user_coredump_note), context);
242 }
243
244 kern_return_t
user_dump_save_note_descriptions(void * refcon __unused,core_save_note_descriptions_cb callback,void * context)245 user_dump_save_note_descriptions(void *refcon __unused, core_save_note_descriptions_cb callback, void *context)
246 {
247 return callback(USER_COREDUMP_DATA_OWNER, sizeof(struct user_coredump_note), context);
248 }
249
250 kern_return_t
user_dump_save_note_data(void * refcon __unused,core_save_note_data_cb callback,void * context)251 user_dump_save_note_data(void *refcon __unused, core_save_note_data_cb callback, void *context)
252 {
253 struct user_coredump_note note;
254 strlcpy(¬e.kernel_uuid_string[0], kernel_uuid_string, sizeof(uuid_string_t));
255
256 return callback(¬e, sizeof(struct user_coredump_note), context);
257 }
258