xref: /xnu-12377.41.6/osfmk/kdp/user_core.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <kdp/processor_core.h>
30 #include <kdp/kdp_core.h>
31 #include <kdp/core_notes.h>
32 #include <kdp/kdp_common.h>
33 #include <vm/vm_map_xnu.h>
34 
35 kern_return_t
user_dump_init(void * refcon,void * context)36 user_dump_init(void *refcon, void *context)
37 {
38 	kern_return_t err = KERN_NODE_DOWN;
39 	struct kern_userspace_coredump_context * uccontext = refcon;
40 
41 	if (!uccontext) {
42 		kern_coredump_log(context, "%s: context allocation failure\n", __func__);
43 		goto finish;
44 	}
45 
46 	if (!uccontext->task) {
47 		kern_coredump_log(context, "%s: no task is set\n", __func__);
48 		goto finish;
49 	}
50 
51 	// Skip inactive tasks
52 	if (!uccontext->task->active) {
53 		kern_coredump_log(context, "%s: skipping inactive task\n", __func__);
54 		goto finish;
55 	}
56 
57 	// If task is locked, it is unsafe to iterate over its threads
58 	if (kdp_task_is_locked(uccontext->task)) {
59 		kern_coredump_log(context, "%s: skipping locked task\n", __func__);
60 		goto finish;
61 	}
62 
63 	// If vm map is locked exclusively, it is unsafe to traverse vm mappings
64 	if (kdp_vm_map_is_acquired_exclusive(uccontext->task->map)) {
65 		kern_coredump_log(context, "%s: skipping task with locked vm map\n", __func__);
66 		goto finish;
67 	}
68 
69 	err = KERN_SUCCESS;
70 
71 finish:
72 	return err;
73 }
74 
75 struct user_dump_preflight_context {
76 	struct kern_userspace_coredump_context * uccontext;
77 	uint32_t region_count;
78 	uint64_t dumpable_bytes;
79 };
80 
81 static kern_return_t
user_dump_map_traverse_preflight_callback(vm_offset_t start,vm_offset_t end,void * context)82 user_dump_map_traverse_preflight_callback(vm_offset_t start, vm_offset_t end, void *context)
83 {
84 	struct user_dump_preflight_context *udc_preflight = context;
85 
86 	assert(start < end);
87 
88 	udc_preflight->region_count++;
89 	udc_preflight->dumpable_bytes += (end - start);
90 
91 	return KERN_SUCCESS;
92 }
93 
94 kern_return_t
user_dump_save_summary(void * refcon,core_save_summary_cb callback,void * context)95 user_dump_save_summary(void *refcon, core_save_summary_cb callback, void *context)
96 {
97 	struct kern_userspace_coredump_context * uccontext = refcon;
98 	struct user_dump_preflight_context udc_preflight = {.uccontext = uccontext, .region_count = 0, .dumpable_bytes = 0};
99 	uint64_t thread_state_size = 0, thread_count = 0;
100 	kern_return_t ret;
101 
102 	ret = kdp_traverse_mappings(uccontext->task,
103 	    KDP_FAULT_FLAGS_ENABLE_FAULTING,
104 	    KDP_TRAVERSE_MAPPINGS_FLAGS_NONE,
105 	    user_dump_map_traverse_preflight_callback,
106 	    &udc_preflight);
107 	if (ret != KERN_SUCCESS) {
108 		kern_coredump_log(context, "%s: vm map traversal failed: %d\n", __func__, ret);
109 		return ret;
110 	}
111 
112 	kern_collect_userth_state_size(uccontext->task, &thread_count, &thread_state_size);
113 	return callback(udc_preflight.region_count, udc_preflight.dumpable_bytes,
114 	           thread_count, thread_state_size, 0, context);
115 }
116 
117 struct user_dump_send_seg_desc_context {
118 	core_save_segment_descriptions_cb callback;
119 	void *context;
120 };
121 
122 static kern_return_t
user_dump_map_traverse_send_segdesc_callback(vm_offset_t start,vm_offset_t end,void * context)123 user_dump_map_traverse_send_segdesc_callback(vm_offset_t start, vm_offset_t end, void *context)
124 {
125 	struct user_dump_send_seg_desc_context *uds_context = context;
126 
127 	assert(start < end);
128 
129 	uint64_t seg_start = (uint64_t) start;
130 	uint64_t seg_end = (uint64_t) end;
131 
132 	return uds_context->callback(seg_start, seg_end, uds_context->context);
133 }
134 
135 kern_return_t
user_dump_save_seg_descriptions(void * refcon,core_save_segment_descriptions_cb callback,void * context)136 user_dump_save_seg_descriptions(void *refcon, core_save_segment_descriptions_cb callback, void *context)
137 {
138 	struct kern_userspace_coredump_context * uccontext = refcon;
139 	struct user_dump_send_seg_desc_context uds_context;
140 	uds_context.callback = callback;
141 	uds_context.context = context;
142 	kern_return_t ret = kdp_traverse_mappings(uccontext->task,
143 	    KDP_FAULT_FLAGS_ENABLE_FAULTING,
144 	    KDP_TRAVERSE_MAPPINGS_FLAGS_NONE,
145 	    user_dump_map_traverse_send_segdesc_callback,
146 	    &uds_context);
147 	if (ret != KERN_SUCCESS) {
148 		kern_coredump_log(context, "%s: vm map traversal failed: %d\n", __func__, ret);
149 		return ret;
150 	}
151 	return KERN_SUCCESS;
152 }
153 
154 kern_return_t
user_dump_save_thread_state(void * refcon,void * buf,core_save_thread_state_cb callback,void * context)155 user_dump_save_thread_state(void *refcon, void *buf, core_save_thread_state_cb callback, void *context)
156 {
157 	struct kern_userspace_coredump_context * uccontext = refcon;
158 	uint64_t thread_state_size = 0, thread_count = 0;
159 	thread_t thread = THREAD_NULL;
160 
161 	kern_collect_userth_state_size(uccontext->task, &thread_count, &thread_state_size);
162 	queue_iterate(&uccontext->task->threads, thread, thread_t, task_threads) {
163 		kern_collect_userth_state(uccontext->task, thread, buf, thread_state_size);
164 
165 		kern_return_t ret = callback(buf, context);
166 		if (ret != KERN_SUCCESS) {
167 			return ret;
168 		}
169 	}
170 
171 	return KERN_SUCCESS;
172 }
173 
174 kern_return_t
user_dump_save_sw_vers_detail(void * refcon,core_save_sw_vers_detail_cb callback,void * context)175 user_dump_save_sw_vers_detail(void *refcon, core_save_sw_vers_detail_cb callback, void *context)
176 {
177 	struct kern_userspace_coredump_context * uccontext = refcon;
178 	uint64_t dyld_load_address;
179 	uuid_t dyld_uuid;
180 	size_t task_page_size;
181 
182 	/*
183 	 * For user coredumps we want the dyld UUID to be stored in the main bin spec LC_NOTE
184 	 */
185 	kern_return_t ret = kdp_task_dyld_info(uccontext->task, KDP_FAULT_FLAGS_ENABLE_FAULTING, &dyld_load_address, dyld_uuid, &task_page_size);
186 	if (ret != KERN_SUCCESS) {
187 		return ret;
188 	}
189 	return callback(dyld_load_address, dyld_uuid, bit_log2(task_page_size), context);
190 }
191 
192 struct user_dump_send_segdata_context {
193 	core_save_segment_data_cb callback;
194 	void *context;
195 };
196 
197 static kern_return_t
user_dump_map_traverse_send_segdata_callback(vm_offset_t physstart,vm_offset_t physend,void * context)198 user_dump_map_traverse_send_segdata_callback(vm_offset_t physstart, vm_offset_t physend, void * context)
199 {
200 	struct user_dump_send_segdata_context * uds_context = context;
201 
202 	assert(physstart < physend);
203 
204 	void * vphysstart = (void *) phystokv(physstart);
205 	return uds_context->callback(vphysstart, (uint64_t)((uintptr_t)physend - (uintptr_t)physstart), uds_context->context);
206 }
207 
208 kern_return_t
user_dump_save_segment_data(void * refcon,core_save_segment_data_cb callback,void * context)209 user_dump_save_segment_data(void *refcon, core_save_segment_data_cb callback, void *context)
210 {
211 	struct kern_userspace_coredump_context * uccontext = refcon;
212 	struct user_dump_send_segdata_context uds_context = {.callback = callback, .context = context};
213 	kern_return_t ret = kdp_traverse_mappings(uccontext->task,
214 	    KDP_FAULT_FLAGS_ENABLE_FAULTING,
215 	    KDP_TRAVERSE_MAPPINGS_FLAGS_PHYSICAL,
216 	    user_dump_map_traverse_send_segdata_callback,
217 	    &uds_context);
218 	if (ret != KERN_SUCCESS) {
219 		kern_coredump_log(context, "%s: vm map traversal failed: %d\n", __func__, ret);
220 		return ret;
221 	}
222 
223 	return KERN_SUCCESS;
224 }
225 
226 
227 /*
228  * Add a LC_NOTE to the core to indicate that it was created during a kernel panic.
229  */
230 
231 extern char     kernel_uuid_string[];
232 
233 kern_return_t
user_dump_save_note_summary(void * refcon __unused,core_save_note_summary_cb callback,void * context)234 user_dump_save_note_summary(void *refcon __unused, core_save_note_summary_cb callback, void *context)
235 {
236 	return callback(2 /* two LC_NOTE */, sizeof(panic_context_note_t) + sizeof(addrable_bits_note_t), context);
237 }
238 
239 kern_return_t
user_dump_save_note_descriptions(void * refcon __unused,core_save_note_descriptions_cb callback,void * context)240 user_dump_save_note_descriptions(void *refcon __unused, core_save_note_descriptions_cb callback, void *context)
241 {
242 	callback(PANIC_CONTEXT_DATA_OWNER, sizeof(panic_context_note_t), context);
243 	return callback(ADDRABLE_BITS_DATA_OWNER, sizeof(addrable_bits_note_t), context);
244 }
245 
246 kern_return_t
user_dump_save_note_data(void * refcon,core_save_note_data_cb callback,void * context)247 user_dump_save_note_data(void *refcon, core_save_note_data_cb callback, void *context)
248 {
249 	panic_context_note_t note;
250 	strlcpy(&note.kernel_uuid_string[0], kernel_uuid_string, sizeof(uuid_string_t));
251 
252 	callback(&note, sizeof(panic_context_note_t), context);
253 
254 	struct kern_userspace_coredump_context *ucontext = refcon;
255 	addrable_bits_note_t note_ab = {
256 		.version = ADDRABLE_BITS_VER,
257 		.addressing_bits = pmap_user_va_bits(get_task_pmap(ucontext->task)),
258 		.unused = 0
259 	};
260 
261 	return callback(&note_ab, sizeof(addrable_bits_note_t), context);
262 }
263