1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if CONFIG_EXCLAVES
30
31 #include <kern/exclaves_debug.h>
32 #include <kern/exclaves_inspection.h>
33 #include <kern/exclaves_stackshot.h>
34 #include <kern/exclaves_test_stackshot.h>
35 #include <kern/exclaves_boot.h>
36 #include <kern/exclaves.tightbeam.h>
37 #include <mach/exclaves_l4.h>
38 #include <vm/pmap.h>
39
40 #define EXCLAVES_STACKSHOT_BATCH_SIZE 32
41 #define EXCLAVES_STACKSHOT_BUFFER_SIZE (16 * PAGE_SIZE)
42
43 #include "exclaves_resource.h"
44
45 #define EXCLAVES_ID_STACKSHOT_SERVER_EP \
46 (exclaves_service_lookup(EXCLAVES_DOMAIN_KERNEL, \
47 "com.apple.service.Stackshot"))
48
49 static _Atomic bool exclaves_inspection_initialized;
50
51 /* Exclaves may provide full stackshot server with service Taker or redacted
52 * stackshot server with service RedactedTaker. */
53 static struct {
54 stackshot_stackshotservervariant_s variant;
55 union {
56 stackshot_redactedtaker_s redacted;
57 stackshot_taker_s internal;
58 } conn;
59 } exclaves_stackshot_client;
60
61 static uint8_t exclaves_stackshot_buffer[EXCLAVES_STACKSHOT_BUFFER_SIZE];
62 static integer_t exclaves_collect_priority = MAXPRI_KERNEL;
63 static thread_t exclaves_collection_thread;
64 static uint64_t scid_list[EXCLAVES_STACKSHOT_BATCH_SIZE];
65 static ctid_t ctid_list[EXCLAVES_STACKSHOT_BATCH_SIZE];
66 static size_t scid_list_count;
67 bool exclaves_stackshot_raw_addresses;
68 bool exclaves_stackshot_all_address_spaces;
69 exclaves_resource_t * stackshot_sharedmem_resource;
70 exclaves_panic_ss_status_t exclaves_panic_ss_status = EXCLAVES_PANIC_STACKSHOT_UNKNOWN;
71
72 static void *exclaves_collect_event = NULL;
73
74 static uint8_t exclaves_collect_thread_ready = 0;
75
76 queue_head_t exclaves_inspection_queue_stackshot;
77 queue_head_t exclaves_inspection_queue_kperf;
78
79 static LCK_GRP_DECLARE(exclaves_inspection_lck_grp, "exclaves_inspection_lock");
80 LCK_MTX_DECLARE(exclaves_collect_mtx, &exclaves_inspection_lck_grp);
81 // Guards initialization to ensure nothing tries to collect before all threads/allocations/etc. are done
82 LCK_MTX_DECLARE(exclaves_collect_init_mtx, &exclaves_inspection_lck_grp);
83
84 static void exclaves_collect_threads_thread(void *arg, wait_result_t __unused wr);
85
86 extern kern_return_t
87 stackshot_exclaves_process_result(kern_return_t collect_kr, const stackshot_stackshotresult_s *result, bool want_raw_addresses);
88
89 extern __attribute__((noinline))
90 void kperf_thread_exclaves_ast_handler(thread_t thread, const stackshot_stackshotentry_s * _Nonnull entry);
91
92 typedef kern_return_t (*exclaves_inspection_process_fn)(kern_return_t collect_kr, const stackshot_stackshotresult_s *data, bool want_raw_addresses);
93
94
95 /* Populate provided buffer with a list of scid values of threads from end of the list. */
96 static size_t
prepare_scid_list_stackshot(queue_t wl,uint64_t * pscid_list,ctid_t * pctid_list,uint64_t max_threads)97 prepare_scid_list_stackshot(queue_t wl, uint64_t *pscid_list, ctid_t *pctid_list, uint64_t max_threads)
98 {
99 thread_t thread = NULL;
100 size_t count = 0;
101
102 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
103
104 for (count = 0; count < max_threads; ++count) {
105 thread = qe_dequeue_tail(wl, struct thread, th_exclaves_inspection_queue_stackshot);
106 if (thread == NULL) {
107 break;
108 }
109 pscid_list[count] = thread->th_exclaves_ipc_ctx.scid;
110 pctid_list[count] = thread_get_ctid(thread);
111 }
112
113 return count;
114 }
115
116 static size_t
prepare_scid_list_kperf(queue_t wl,uint64_t * pscid_list,ctid_t * pctid_list,uint64_t max_threads)117 prepare_scid_list_kperf(queue_t wl, uint64_t *pscid_list, ctid_t *pctid_list, uint64_t max_threads)
118 {
119 thread_t thread = NULL;
120 size_t count = 0;
121
122 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
123
124 for (count = 0; count < max_threads; ++count) {
125 thread = qe_dequeue_tail(wl, struct thread, th_exclaves_inspection_queue_kperf);
126 if (thread == NULL) {
127 break;
128 }
129 pscid_list[count] = thread->th_exclaves_ipc_ctx.scid;
130 pctid_list[count] = thread_get_ctid(thread);
131 }
132
133 return count;
134 }
135
136 /* Clear flag from the list of pending threads, allowing them to run. */
137 static void
clear_pending_threads_stackshot(ctid_t * ctids,size_t count,thread_exclaves_inspection_flags_t flag)138 clear_pending_threads_stackshot(ctid_t *ctids, size_t count, thread_exclaves_inspection_flags_t flag)
139 {
140 size_t i;
141 thread_t thread;
142
143 for (i = 0; i < count; ++i) {
144 thread = ctid_get_thread(ctids[i]);
145 ctids[i] = 0;
146 assert(thread);
147
148 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
149 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_stackshot, THREAD_AWAKENED);
150 thread_deallocate_safe(thread);
151 }
152 }
153
154 static void
clear_pending_threads_kperf(ctid_t * ctids,size_t count,thread_exclaves_inspection_flags_t flag)155 clear_pending_threads_kperf(ctid_t *ctids, size_t count, thread_exclaves_inspection_flags_t flag)
156 {
157 size_t i;
158 thread_t thread;
159
160 for (i = 0; i < count; ++i) {
161 thread = ctid_get_thread(ctids[i]);
162 ctids[i] = 0;
163 assert(thread);
164
165 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
166 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_kperf, THREAD_AWAKENED);
167 thread_deallocate_safe(thread);
168 }
169 }
170
171 static void
clear_stackshot_queue(thread_exclaves_inspection_flags_t flag)172 clear_stackshot_queue(thread_exclaves_inspection_flags_t flag)
173 {
174 thread_t thread;
175
176 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
177
178 while (!queue_empty(&exclaves_inspection_queue_stackshot)) {
179 thread = qe_dequeue_tail(&exclaves_inspection_queue_stackshot, struct thread, th_exclaves_inspection_queue_stackshot);
180 assert(thread);
181 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
182 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_stackshot, THREAD_AWAKENED);
183 thread_deallocate_safe(thread);
184 }
185 }
186
187 static void
clear_kperf_queue(thread_exclaves_inspection_flags_t flag)188 clear_kperf_queue(thread_exclaves_inspection_flags_t flag)
189 {
190 thread_t thread;
191
192 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
193
194 while (!queue_empty(&exclaves_inspection_queue_kperf)) {
195 thread = qe_dequeue_tail(&exclaves_inspection_queue_kperf, struct thread, th_exclaves_inspection_queue_kperf);
196 assert(thread);
197 os_atomic_and(&thread->th_exclaves_inspection_state, ~flag, relaxed);
198 wakeup_all_with_inheritor((event_t)&thread->th_exclaves_inspection_queue_kperf, THREAD_AWAKENED);
199 thread_deallocate_safe(thread);
200 }
201 }
202
203 static kern_return_t
process_exclaves_buffer(uint8_t * buffer,size_t output_length,exclaves_inspection_process_fn process_fn,bool want_raw_addresses)204 process_exclaves_buffer(uint8_t * buffer, size_t output_length, exclaves_inspection_process_fn process_fn, bool want_raw_addresses)
205 {
206 __block kern_return_t error = KERN_SUCCESS;
207 tb_error_t tberr = TB_ERROR_SUCCESS;
208
209 if (output_length) {
210 tberr = stackshot_stackshotresult__unmarshal(buffer, output_length, ^(stackshot_stackshotresult_s result){
211 error = process_fn(KERN_SUCCESS, &result, want_raw_addresses);
212 if (error != KERN_SUCCESS) {
213 exclaves_debug_printf(show_errors, "exclaves stackshot: error processing stackshot result\n");
214 }
215 });
216 if (tberr != TB_ERROR_SUCCESS) {
217 exclaves_debug_printf(show_errors, "exclaves stackshot: process_exclaves_buffer could not unmarshal stackshot data 0x%x\n", tberr);
218 error = KERN_FAILURE;
219 goto error_exit;
220 }
221 } else {
222 error = KERN_FAILURE;
223 exclaves_debug_printf(show_errors, "exclaves stackshot: exclave stackshot data did not fit into shared memory buffer\n");
224 }
225
226 error_exit:
227 return error;
228 }
229
230 static kern_return_t
collect_scid_list(exclaves_inspection_process_fn process_fn,bool want_raw_addresses,bool all_address_spaces)231 collect_scid_list(exclaves_inspection_process_fn process_fn, bool want_raw_addresses, bool all_address_spaces)
232 {
233 __block kern_return_t kr = KERN_SUCCESS;
234 tb_error_t tberr = 0;
235 scid_v_s scids = { 0 };
236
237 scid__v_assign_unowned(&scids, scid_list, scid_list_count);
238
239 // copy data from stackshot_sharedmem_resource to exclaves_stackshot_buffer
240 void (^success_handler)(stackshot_outputlength_s);
241 success_handler = ^(stackshot_outputlength_s output_length) {
242 __assert_only size_t len = 0;
243 char *ss_buffer = exclaves_resource_shared_memory_get_buffer(stackshot_sharedmem_resource, &len);
244 assert3u(len, ==, EXCLAVES_STACKSHOT_BUFFER_SIZE);
245
246 assert3u(output_length, <=, EXCLAVES_STACKSHOT_BUFFER_SIZE);
247 memcpy(exclaves_stackshot_buffer, ss_buffer, output_length);
248
249 kr = process_exclaves_buffer(exclaves_stackshot_buffer, (size_t)output_length, process_fn, want_raw_addresses);
250 };
251
252 if (exclaves_stackshot_client.variant == STACKSHOT_STACKSHOTSERVERVARIANT_INTERNAL) {
253 tberr = stackshot_taker_runstackshot(&exclaves_stackshot_client.conn.internal, &scids, want_raw_addresses, all_address_spaces, ^(stackshot_taker_runstackshot__result_s res) {
254 stackshot_outputlength_s * p_len = stackshot_taker_runstackshot__result_get_success(&res);
255 if (p_len) {
256 success_handler(*p_len);
257 } else {
258 stackshot_stackshotserverfailure_s * p_failure = stackshot_taker_runstackshot__result_get_failure(&res);
259 if (p_failure) {
260 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_taker_runstackshot failure %ul\n", *p_failure);
261 } else {
262 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_taker_runstackshot unknown failure\n");
263 }
264 }
265 });
266 } else {
267 tberr = stackshot_redactedtaker_runstackshotredacted(&exclaves_stackshot_client.conn.redacted, &scids, all_address_spaces, ^(stackshot_redactedtaker_runstackshotredacted__result_s res){
268 stackshot_outputlength_s * p_len = stackshot_redactedtaker_runstackshotredacted__result_get_success(&res);
269 if (p_len) {
270 success_handler(*p_len);
271 } else {
272 stackshot_stackshotserverfailure_s * p_failure = stackshot_redactedtaker_runstackshotredacted__result_get_failure(&res);
273 if (p_failure) {
274 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_redactedtaker_runstackshotredacted failure %ul\n", *p_failure);
275 } else {
276 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_redactedtaker_runstackshotredacted unknown failure\n");
277 }
278 }
279 });
280 }
281
282 if (tberr != TB_ERROR_SUCCESS) {
283 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_(redacted)taker_runstackshot error 0x%x\n", tberr);
284 kr = KERN_FAILURE;
285 goto error_exit;
286 }
287
288 error_exit:
289 exclaves_debug_printf(show_progress, "exclaves stackshot: collection done with result %d\n", kr);
290 return kr;
291 }
292
293 static kern_return_t
complete_kperf_ast(kern_return_t collect_kr,const stackshot_stackshotresult_s * result,__unused bool want_raw_addresses)294 complete_kperf_ast(kern_return_t collect_kr, const stackshot_stackshotresult_s *result, __unused bool want_raw_addresses)
295 {
296 if (collect_kr != KERN_SUCCESS) {
297 return collect_kr;
298 }
299
300 stackshot_stackshotentry__v_visit(&result->stackshotentries, ^(size_t i, const stackshot_stackshotentry_s * _Nonnull entry) {
301 assert(i < scid_list_count);
302 thread_t thread = ctid_get_thread(ctid_list[i]);
303 assert(thread);
304 kperf_thread_exclaves_ast_handler(thread, entry);
305 });
306
307 return KERN_SUCCESS;
308 }
309
310 /*
311 * Kernel thread that will collect, upon event (exclaves_collect_event), data
312 * on the current activity in the Exclave world of a set of threads registered
313 * with its waitlist.
314 */
315 __attribute__((noreturn))
316 static void
exclaves_collect_threads_thread(void __unused * arg,wait_result_t __unused wr)317 exclaves_collect_threads_thread(void __unused *arg, wait_result_t __unused wr)
318 {
319 kern_return_t kr = KERN_SUCCESS;
320
321 kr = exclaves_allocate_ipc_buffer(NULL);
322 if (kr != KERN_SUCCESS) {
323 panic("exclaves stackshot: failed to allocate collect ipcb: %d", kr);
324 }
325
326 os_atomic_store(¤t_thread()->th_exclaves_inspection_state, TH_EXCLAVES_INSPECTION_NOINSPECT, relaxed);
327 lck_mtx_lock(&exclaves_collect_init_mtx);
328 exclaves_collect_thread_ready = true;
329 wakeup_all_with_inheritor(&exclaves_collect_thread_ready, THREAD_AWAKENED);
330 lck_mtx_unlock(&exclaves_collect_init_mtx);
331
332 lck_mtx_lock(&exclaves_collect_mtx);
333
334 for (;;) {
335 while (queue_empty(&exclaves_inspection_queue_stackshot) && queue_empty(&exclaves_inspection_queue_kperf)) {
336 lck_mtx_sleep(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT, (event_t)&exclaves_collect_event, THREAD_UNINT);
337 }
338
339 if (!queue_empty(&exclaves_inspection_queue_stackshot)) {
340 // only this thread should manipulate the scid_list
341 scid_list_count = prepare_scid_list_stackshot(&exclaves_inspection_queue_stackshot, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
342 while (scid_list_count) {
343 lck_mtx_unlock(&exclaves_collect_mtx);
344
345 kr = collect_scid_list(stackshot_exclaves_process_result, exclaves_stackshot_raw_addresses, exclaves_stackshot_all_address_spaces);
346 lck_mtx_lock(&exclaves_collect_mtx);
347 clear_pending_threads_stackshot(ctid_list, scid_list_count, TH_EXCLAVES_INSPECTION_STACKSHOT);
348 if (kr != KERN_SUCCESS) {
349 goto stackshot_error;
350 }
351
352 scid_list_count = prepare_scid_list_stackshot(&exclaves_inspection_queue_stackshot, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
353 }
354
355 stackshot_error:
356 if (!queue_empty(&exclaves_inspection_queue_stackshot)) {
357 clear_stackshot_queue(TH_EXCLAVES_INSPECTION_STACKSHOT);
358 }
359 stackshot_exclaves_process_result(kr, NULL, true);
360 wakeup_all_with_inheritor(&exclaves_inspection_queue_stackshot, THREAD_AWAKENED);
361 }
362
363 if (!queue_empty(&exclaves_inspection_queue_kperf)) {
364 scid_list_count = prepare_scid_list_kperf(&exclaves_inspection_queue_kperf, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
365 while (scid_list_count) {
366 lck_mtx_unlock(&exclaves_collect_mtx);
367
368 kr = collect_scid_list(complete_kperf_ast, false, false);
369 lck_mtx_lock(&exclaves_collect_mtx);
370 clear_pending_threads_kperf(ctid_list, scid_list_count, TH_EXCLAVES_INSPECTION_KPERF);
371 if (kr != KERN_SUCCESS) {
372 goto kperf_error;
373 }
374
375 scid_list_count = prepare_scid_list_kperf(&exclaves_inspection_queue_kperf, scid_list, ctid_list, EXCLAVES_STACKSHOT_BATCH_SIZE);
376 }
377 kperf_error:
378 if (!queue_empty(&exclaves_inspection_queue_kperf)) {
379 clear_kperf_queue(TH_EXCLAVES_INSPECTION_KPERF);
380 }
381 }
382 }
383 }
384
385 void
exclaves_inspection_begin_collecting(void)386 exclaves_inspection_begin_collecting(void)
387 {
388 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
389
390 thread_wakeup_thread((event_t)&exclaves_collect_event, exclaves_collection_thread);
391 }
392
393 void
exclaves_inspection_wait_complete(queue_t queue)394 exclaves_inspection_wait_complete(queue_t queue)
395 {
396 lck_mtx_assert(&exclaves_collect_mtx, LCK_MTX_ASSERT_OWNED);
397
398 while (!queue_empty(queue)) {
399 lck_mtx_sleep_with_inheritor(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT, (event_t)queue, exclaves_collection_thread, THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
400 }
401 }
402
403 static kern_return_t
exclaves_inspection_init(void)404 exclaves_inspection_init(void)
405 {
406 __block kern_return_t kr = KERN_SUCCESS;
407 tb_error_t tberr = 0;
408 tb_endpoint_t tb_endpoint = { 0 };
409
410 assert(!os_atomic_load(&exclaves_inspection_initialized, relaxed));
411
412 /*
413 * If there's no stackshot service available, just return.
414 */
415 if (EXCLAVES_ID_STACKSHOT_SERVER_EP == EXCLAVES_INVALID_ID) {
416 exclaves_requirement_assert(EXCLAVES_R_STACKSHOT,
417 "stackshot server not found");
418 return KERN_SUCCESS;
419 }
420
421 queue_init(&exclaves_inspection_queue_stackshot);
422 queue_init(&exclaves_inspection_queue_kperf);
423
424 tb_endpoint = tb_endpoint_create_with_value(TB_TRANSPORT_TYPE_XNU, EXCLAVES_ID_STACKSHOT_SERVER_EP, TB_ENDPOINT_OPTIONS_NONE);
425
426 tberr = stackshot_redactedtaker__init(&exclaves_stackshot_client.conn.redacted, tb_endpoint);
427 if (tberr != TB_ERROR_SUCCESS) {
428 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_redactedtaker__init error 0x%x\n", tberr);
429 return KERN_FAILURE;
430 }
431
432 /* This will initialize whatever version of stackshot server is available */
433 tberr = stackshot_redactedtaker_initialize(&exclaves_stackshot_client.conn.redacted, ^(stackshot_stackshotservervariant_s variant) {
434 exclaves_stackshot_client.variant = variant;
435 });
436
437 if (tberr != TB_ERROR_SUCCESS) {
438 exclaves_debug_printf(show_errors, "exclaves stackshot: stackshot_redactedtaker_initialize error 0x%x\n", tberr);
439 return KERN_FAILURE;
440 }
441
442 if (exclaves_stackshot_client.variant == STACKSHOT_STACKSHOTSERVERVARIANT_INTERNAL) {
443 tb_endpoint = tb_endpoint_create_with_value(TB_TRANSPORT_TYPE_XNU, EXCLAVES_ID_STACKSHOT_SERVER_EP, TB_ENDPOINT_OPTIONS_NONE);
444 tberr = stackshot_taker__init(&exclaves_stackshot_client.conn.internal, tb_endpoint);
445 if (tberr != TB_ERROR_SUCCESS) {
446 panic("exclaves stackshot: stackshot_redactedtaker__init error 0x%x\n", tberr);
447 }
448 }
449
450 // initialize sharedmemv2 resource
451 const char *v2_seg_name = "com.apple.sharedmem.stackshotserver";
452 kr = exclaves_resource_shared_memory_map(
453 EXCLAVES_DOMAIN_KERNEL, v2_seg_name,
454 EXCLAVES_STACKSHOT_BUFFER_SIZE,
455 EXCLAVES_BUFFER_PERM_READ,
456 &stackshot_sharedmem_resource);
457
458 if (kr != KERN_SUCCESS) {
459 exclaves_debug_printf(show_errors,
460 "exclaves_inspection_init: Cannot map shared memory segment '%s': failed with %d\n",
461 v2_seg_name, kr);
462 return kr;
463 }
464
465 exclaves_debug_printf(show_progress, "exclaves stackshot: exclaves inspection initialized\n");
466
467 kr = (kernel_thread_start_priority(
468 exclaves_collect_threads_thread, NULL, exclaves_collect_priority, &exclaves_collection_thread));
469 if (kr != KERN_SUCCESS) {
470 goto error_exit;
471 }
472 thread_set_thread_name(exclaves_collection_thread, "exclaves-stackshot");
473 thread_deallocate(exclaves_collection_thread);
474
475 lck_mtx_lock(&exclaves_collect_init_mtx);
476
477 while (!exclaves_collect_thread_ready) {
478 lck_mtx_sleep_with_inheritor(&exclaves_collect_init_mtx, LCK_SLEEP_DEFAULT, (event_t)&exclaves_collect_thread_ready, exclaves_collection_thread, THREAD_UNINT, TIMEOUT_WAIT_FOREVER);
479 }
480
481 os_atomic_store(&exclaves_inspection_initialized, true, release);
482 lck_mtx_unlock(&exclaves_collect_init_mtx);
483 error_exit:
484 return kr;
485 }
486
487 EXCLAVES_BOOT_TASK(exclaves_inspection_init, EXCLAVES_BOOT_RANK_SECOND);
488
489 bool
exclaves_inspection_is_initialized()490 exclaves_inspection_is_initialized()
491 {
492 return os_atomic_load(&exclaves_inspection_initialized, acquire);
493 }
494
495 /*
496 * TH_EXCLAVES_INSPECTION_STACKSHOT is set when stackshot is running in debug mode
497 * and adds a thread to waiting list.
498 *
499 * TH_EXCLAVES_INSPECTION_STACKSHOT is cleaned up by a collection thread which is
500 * holding exclaves_collect_mtx.
501 */
502 void
exclaves_inspection_check_ast(void)503 exclaves_inspection_check_ast(void)
504 {
505 thread_t thread = current_thread();
506
507 assert((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_NOINSPECT) == 0);
508
509 /* This will unblock exclaves stackshot collection */
510 STACKSHOT_TESTPOINT(TP_AST);
511
512 /* Grab the mutex to prevent cleanup just after next check */
513 lck_mtx_lock(&exclaves_collect_mtx);
514 while ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_STACKSHOT) != 0) {
515 lck_mtx_sleep_with_inheritor(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT,
516 (event_t)&thread->th_exclaves_inspection_queue_stackshot, exclaves_collection_thread,
517 THREAD_UNINT, TIMEOUT_WAIT_FOREVER
518 );
519 }
520
521 if ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & TH_EXCLAVES_INSPECTION_KPERF) != 0) {
522 exclaves_inspection_queue_add(&exclaves_inspection_queue_kperf, &thread->th_exclaves_inspection_queue_kperf);
523 thread_reference(thread);
524 exclaves_inspection_begin_collecting();
525 lck_mtx_sleep_with_inheritor(&exclaves_collect_mtx, LCK_SLEEP_DEFAULT,
526 (event_t)&thread->th_exclaves_inspection_queue_kperf, exclaves_collection_thread,
527 THREAD_UNINT, TIMEOUT_WAIT_FOREVER
528 );
529 }
530 lck_mtx_unlock(&exclaves_collect_mtx);
531 }
532
533
534 /* this should come from somewhere in EP */
535 #define STACKSHOT_PANIC_MAGIC 0xdeadcafebeefbabe
536 typedef struct stackshot_panic_magic {
537 uint64_t magic;
538 uint64_t size;
539 } stackshot_panic_magic_t;
540 _Static_assert(sizeof(stackshot_panic_magic_t) == 16, "panic magic should be 16 bytes");
541
542 void
kdp_read_panic_exclaves_stackshot(struct exclaves_panic_stackshot * eps)543 kdp_read_panic_exclaves_stackshot(struct exclaves_panic_stackshot *eps)
544 {
545 assert(debug_mode_active());
546
547 *eps = (struct exclaves_panic_stackshot){ 0 };
548
549 if (!exclaves_inspection_is_initialized()) {
550 return;
551 }
552
553 /* copy the entire potential range of the buffer */
554 __assert_only size_t len = 0;
555 char *ss_buffer = exclaves_resource_shared_memory_get_buffer(stackshot_sharedmem_resource, &len);
556 assert3u(len, ==, EXCLAVES_STACKSHOT_BUFFER_SIZE);
557 memcpy(exclaves_stackshot_buffer, ss_buffer, EXCLAVES_STACKSHOT_BUFFER_SIZE);
558
559 /* check for panic magic value in xnu's copy of the region */
560 stackshot_panic_magic_t *panic_magic = __IGNORE_WCASTALIGN((stackshot_panic_magic_t *)(exclaves_stackshot_buffer + (EXCLAVES_STACKSHOT_BUFFER_SIZE - sizeof(stackshot_panic_magic_t))));
561 if (panic_magic->magic != STACKSHOT_PANIC_MAGIC) {
562 return;
563 }
564
565 eps->stackshot_buffer = exclaves_stackshot_buffer;
566 eps->stackshot_buffer_size = panic_magic->size;
567 }
568
569 #endif /* CONFIG_EXCLAVES */
570