1 /*
2 * Copyright (c) 2007-2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/proc_reg.h>
39 #include <sys/random.h>
40 #if __has_feature(ptrauth_calls)
41 #include <ptrauth.h>
42 #endif
43
44 #include <libkern/coreanalytics/coreanalytics.h>
45
46
47 struct arm_vfpv2_state {
48 __uint32_t __r[32];
49 __uint32_t __fpscr;
50 };
51
52 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
53
54 #define ARM_VFPV2_STATE_COUNT \
55 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
56
57 /*
58 * Forward definitions
59 */
60 void thread_set_child(thread_t child, int pid);
61 static void free_debug_state(thread_t thread);
62 user_addr_t thread_get_sigreturn_token(thread_t thread);
63 uint32_t thread_get_sigreturn_diversifier(thread_t thread);
64
65 /*
66 * Maps state flavor to number of words in the state:
67 */
68 /* __private_extern__ */
69 unsigned int _MachineStateCount[THREAD_STATE_FLAVORS] = {
70 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
71 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
72 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
73 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
74 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
75 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
76 [ARM_EXCEPTION_STATE64_V2] = ARM_EXCEPTION_STATE64_V2_COUNT,
77 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
78 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
79 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
80 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
81 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
82 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
83 };
84
85 extern zone_t ads_zone;
86
87 #if __arm64__
88 /*
89 * Copy values from saved_state to ts64.
90 */
91 void
saved_state_to_thread_state64(const arm_saved_state_t * saved_state,arm_thread_state64_t * ts64)92 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
93 arm_thread_state64_t * ts64)
94 {
95 uint32_t i;
96
97 assert(is_saved_state64(saved_state));
98
99 ts64->fp = get_saved_state_fp(saved_state);
100 ts64->lr = get_saved_state_lr(saved_state);
101 ts64->sp = get_saved_state_sp(saved_state);
102 ts64->pc = get_saved_state_pc(saved_state);
103 ts64->cpsr = get_saved_state_cpsr(saved_state);
104 for (i = 0; i < 29; i++) {
105 ts64->x[i] = get_saved_state_reg(saved_state, i);
106 }
107 }
108
109 /*
110 * Copy values from ts64 to saved_state.
111 *
112 * For safety, CPSR is sanitized as follows:
113 *
114 * - ts64->cpsr.{N,Z,C,V} are copied as-is into saved_state->cpsr
115 * - ts64->cpsr.M is ignored, and saved_state->cpsr.M is reset to EL0
116 * - All other saved_state->cpsr bits are preserved as-is
117 */
118 void
thread_state64_to_saved_state(const arm_thread_state64_t * ts64,arm_saved_state_t * saved_state)119 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
120 arm_saved_state_t * saved_state)
121 {
122 uint32_t i;
123 #if __has_feature(ptrauth_calls)
124 uint64_t intr = ml_pac_safe_interrupts_disable();
125 #endif /* __has_feature(ptrauth_calls) */
126
127 assert(is_saved_state64(saved_state));
128
129 const uint32_t CPSR_COPY_MASK = PSR64_USER_MASK;
130 const uint32_t CPSR_ZERO_MASK = PSR64_MODE_MASK;
131 const uint32_t CPSR_PRESERVE_MASK = ~(CPSR_COPY_MASK | CPSR_ZERO_MASK);
132 #if __has_feature(ptrauth_calls)
133 /* BEGIN IGNORE CODESTYLE */
134 MANIPULATE_SIGNED_USER_THREAD_STATE(saved_state,
135 "and w2, w2, %w[preserve_mask]" "\n"
136 "mov w6, %w[cpsr]" "\n"
137 "and w6, w6, %w[copy_mask]" "\n"
138 "orr w2, w2, w6" "\n"
139 "str w2, [x0, %[SS64_CPSR]]" "\n",
140 [cpsr] "r"(ts64->cpsr),
141 [preserve_mask] "i"(CPSR_PRESERVE_MASK),
142 [copy_mask] "i"(CPSR_COPY_MASK)
143 );
144 /* END IGNORE CODESTYLE */
145 /*
146 * Make writes to ts64->cpsr visible first, since it's useful as a
147 * canary to detect thread-state corruption.
148 */
149 __builtin_arm_dmb(DMB_ST);
150 #else
151 uint32_t new_cpsr = get_saved_state_cpsr(saved_state);
152 new_cpsr &= CPSR_PRESERVE_MASK;
153 new_cpsr |= (ts64->cpsr & CPSR_COPY_MASK);
154 set_user_saved_state_cpsr(saved_state, new_cpsr);
155 #endif /* __has_feature(ptrauth_calls) */
156 set_saved_state_fp(saved_state, ts64->fp);
157 set_user_saved_state_lr(saved_state, ts64->lr);
158 set_saved_state_sp(saved_state, ts64->sp);
159 set_user_saved_state_pc(saved_state, ts64->pc);
160 for (i = 0; i < 29; i++) {
161 set_user_saved_state_reg(saved_state, i, ts64->x[i]);
162 }
163
164 #if __has_feature(ptrauth_calls)
165 ml_pac_safe_interrupts_restore(intr);
166 #endif /* __has_feature(ptrauth_calls) */
167 }
168
169 #endif /* __arm64__ */
170
171 static kern_return_t
handle_get_arm32_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)172 handle_get_arm32_thread_state(thread_state_t tstate,
173 mach_msg_type_number_t * count,
174 const arm_saved_state_t * saved_state)
175 {
176 if (*count < ARM_THREAD_STATE32_COUNT) {
177 return KERN_INVALID_ARGUMENT;
178 }
179 if (!is_saved_state32(saved_state)) {
180 return KERN_INVALID_ARGUMENT;
181 }
182
183 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
184 *count = ARM_THREAD_STATE32_COUNT;
185 return KERN_SUCCESS;
186 }
187
188 static kern_return_t
handle_get_arm64_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)189 handle_get_arm64_thread_state(thread_state_t tstate,
190 mach_msg_type_number_t * count,
191 const arm_saved_state_t * saved_state)
192 {
193 if (*count < ARM_THREAD_STATE64_COUNT) {
194 return KERN_INVALID_ARGUMENT;
195 }
196 if (!is_saved_state64(saved_state)) {
197 return KERN_INVALID_ARGUMENT;
198 }
199
200 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
201 *count = ARM_THREAD_STATE64_COUNT;
202 return KERN_SUCCESS;
203 }
204
205
206 static kern_return_t
handle_get_arm_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)207 handle_get_arm_thread_state(thread_state_t tstate,
208 mach_msg_type_number_t * count,
209 const arm_saved_state_t * saved_state)
210 {
211 /* In an arm64 world, this flavor can be used to retrieve the thread
212 * state of a 32-bit or 64-bit thread into a unified structure, but we
213 * need to support legacy clients who are only aware of 32-bit, so
214 * check the count to see what the client is expecting.
215 */
216 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
217 return handle_get_arm32_thread_state(tstate, count, saved_state);
218 }
219
220 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
221 bzero(unified_state, sizeof(*unified_state));
222 #if __arm64__
223 if (is_saved_state64(saved_state)) {
224 unified_state->ash.flavor = ARM_THREAD_STATE64;
225 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
226 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
227 } else
228 #endif
229 {
230 unified_state->ash.flavor = ARM_THREAD_STATE32;
231 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
232 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
233 }
234 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
235 return KERN_SUCCESS;
236 }
237
238
239 static kern_return_t
handle_set_arm32_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)240 handle_set_arm32_thread_state(const thread_state_t tstate,
241 mach_msg_type_number_t count,
242 arm_saved_state_t * saved_state)
243 {
244 if (count != ARM_THREAD_STATE32_COUNT) {
245 return KERN_INVALID_ARGUMENT;
246 }
247
248 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
249 return KERN_SUCCESS;
250 }
251
252 static kern_return_t
handle_set_arm64_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)253 handle_set_arm64_thread_state(const thread_state_t tstate,
254 mach_msg_type_number_t count,
255 arm_saved_state_t * saved_state)
256 {
257 if (count != ARM_THREAD_STATE64_COUNT) {
258 return KERN_INVALID_ARGUMENT;
259 }
260
261 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
262 return KERN_SUCCESS;
263 }
264
265
266 static kern_return_t
handle_set_arm_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)267 handle_set_arm_thread_state(const thread_state_t tstate,
268 mach_msg_type_number_t count,
269 arm_saved_state_t * saved_state)
270 {
271 /* In an arm64 world, this flavor can be used to set the thread state of a
272 * 32-bit or 64-bit thread from a unified structure, but we need to support
273 * legacy clients who are only aware of 32-bit, so check the count to see
274 * what the client is expecting.
275 */
276 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
277 if (!is_saved_state32(saved_state)) {
278 return KERN_INVALID_ARGUMENT;
279 }
280 return handle_set_arm32_thread_state(tstate, count, saved_state);
281 }
282
283 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
284 #if __arm64__
285 if (is_thread_state64(unified_state)) {
286 if (!is_saved_state64(saved_state)) {
287 return KERN_INVALID_ARGUMENT;
288 }
289 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
290 } else
291 #endif
292 {
293 if (!is_saved_state32(saved_state)) {
294 return KERN_INVALID_ARGUMENT;
295 }
296 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
297 }
298
299 return KERN_SUCCESS;
300 }
301
302
303 #if __has_feature(ptrauth_calls)
304
305 static inline uint32_t
thread_generate_sigreturn_token(void * ptr,thread_t thread)306 thread_generate_sigreturn_token(
307 void *ptr,
308 thread_t thread)
309 {
310 user64_addr_t token = (user64_addr_t)ptr;
311 token ^= (user64_addr_t)thread_get_sigreturn_token(thread);
312 token = (user64_addr_t)pmap_sign_user_ptr((void*)token,
313 ptrauth_key_process_independent_data, ptrauth_string_discriminator("nonce"),
314 thread->machine.jop_pid);
315 token >>= 32;
316 return (uint32_t)token;
317 }
318 #endif //__has_feature(ptrauth_calls)
319
320 /*
321 * Translate thread state arguments to userspace representation
322 */
323
324 kern_return_t
machine_thread_state_convert_to_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t tssf_flags)325 machine_thread_state_convert_to_user(
326 thread_t thread,
327 thread_flavor_t flavor,
328 thread_state_t tstate,
329 mach_msg_type_number_t *count,
330 thread_set_status_flags_t tssf_flags)
331 {
332 #if __has_feature(ptrauth_calls)
333 arm_thread_state64_t *ts64;
334 bool preserve_flags = !!(tssf_flags & TSSF_PRESERVE_FLAGS);
335 bool stash_sigreturn_token = !!(tssf_flags & TSSF_STASH_SIGRETURN_TOKEN);
336 bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
337 bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
338 bool task_div = !!(tssf_flags & TSSF_TASK_USER_DIV);
339 uint32_t old_flags;
340 bool kernel_signed_pc = true;
341 bool kernel_signed_lr = true;
342 uint32_t userland_diversifier = 0;
343
344 switch (flavor) {
345 case ARM_THREAD_STATE:
346 {
347 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
348
349 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
350 return KERN_SUCCESS;
351 }
352 ts64 = thread_state64(unified_state);
353 break;
354 }
355 case ARM_THREAD_STATE64:
356 {
357 if (*count < ARM_THREAD_STATE64_COUNT) {
358 return KERN_SUCCESS;
359 }
360 ts64 = (arm_thread_state64_t *)tstate;
361 break;
362 }
363 default:
364 return KERN_SUCCESS;
365 }
366
367 // Note that kernel threads never have disable_user_jop set
368 if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
369 !thread_is_64bit_addr(current_thread()) ||
370 (thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) || !thread_is_64bit_addr(thread)
371 ) {
372 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
373 return KERN_SUCCESS;
374 }
375
376 old_flags = ts64->flags;
377 ts64->flags = 0;
378 if (ts64->lr) {
379 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
380 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
381 ptrauth_key_return_address);
382 if (ts64->lr != stripped_lr) {
383 // Need to allow already-signed lr value to round-trip as is
384 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
385 }
386 // Note that an IB-signed return address that happens to have a 0 signature value
387 // will round-trip correctly even if IA-signed again below (and IA-authd later)
388 }
389
390 if (arm_user_jop_disabled()) {
391 return KERN_SUCCESS;
392 }
393
394 if (preserve_flags) {
395 assert(random_div == false);
396 assert(thread_div == false);
397
398 /* Restore the diversifier and other opaque flags */
399 ts64->flags |= (old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
400 userland_diversifier = old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
401 if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC)) {
402 kernel_signed_pc = false;
403 }
404 if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR)) {
405 kernel_signed_lr = false;
406 }
407 } else {
408 /* Set a non zero userland diversifier */
409 if (random_div || task_div) {
410 /* Still use random div in case of task_div to avoid leaking the secret key */
411 do {
412 read_random(&userland_diversifier, sizeof(userland_diversifier));
413 userland_diversifier &=
414 __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
415 } while (userland_diversifier == 0);
416 } else if (thread_div) {
417 userland_diversifier = thread_get_sigreturn_diversifier(thread) &
418 __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
419 }
420 ts64->flags |= userland_diversifier;
421 }
422
423 if (kernel_signed_pc) {
424 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC;
425 }
426
427 if (kernel_signed_lr) {
428 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR;
429 }
430
431
432 if (ts64->pc) {
433 uint64_t discriminator = ptrauth_string_discriminator("pc");
434 if (!kernel_signed_pc && userland_diversifier != 0) {
435 discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
436 ptrauth_string_discriminator("pc"));
437 }
438
439 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
440 ptrauth_key_process_independent_code, discriminator,
441 thread->machine.jop_pid);
442 }
443 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
444 uint64_t discriminator = ptrauth_string_discriminator("lr");
445 if (!kernel_signed_lr && userland_diversifier != 0) {
446 discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
447 ptrauth_string_discriminator("lr"));
448 }
449
450 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
451 ptrauth_key_process_independent_code, discriminator,
452 thread->machine.jop_pid);
453 }
454 if (ts64->sp) {
455 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
456 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
457 thread->machine.jop_pid);
458 }
459 if (ts64->fp) {
460 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
461 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
462 thread->machine.jop_pid);
463 }
464
465 /* Stash the sigreturn token */
466 if (stash_sigreturn_token) {
467 if (kernel_signed_pc) {
468 uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
469 __DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
470 __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK);
471 }
472
473 if (kernel_signed_lr) {
474 uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
475 __DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
476 __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK);
477 }
478 }
479
480 return KERN_SUCCESS;
481 #else
482 // No conversion to userspace representation on this platform
483 (void)thread; (void)flavor; (void)tstate; (void)count; (void)tssf_flags;
484 return KERN_SUCCESS;
485 #endif /* __has_feature(ptrauth_calls) */
486 }
487
488 #if __has_feature(ptrauth_calls)
489 extern char * proc_name_address(void *p);
490
491 CA_EVENT(pac_thread_state_exception_event,
492 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
493
494 static void
machine_thread_state_check_pac_state(arm_thread_state64_t * ts64,arm_thread_state64_t * old_ts64)495 machine_thread_state_check_pac_state(
496 arm_thread_state64_t *ts64,
497 arm_thread_state64_t *old_ts64)
498 {
499 bool send_event = false;
500 task_t task = current_task();
501 void *proc = get_bsdtask_info(task);
502 char *proc_name = (char *) "unknown";
503
504 if (((ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC) &&
505 ts64->pc != old_ts64->pc) || (!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
506 (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR) && (ts64->lr != old_ts64->lr ||
507 (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)))) {
508 send_event = true;
509 }
510
511 if (!send_event) {
512 return;
513 }
514
515 proc_name = proc_name_address(proc);
516 ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_exception_event);
517 CA_EVENT_TYPE(pac_thread_state_exception_event) * pexc_event = ca_event->data;
518 strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
519 CA_EVENT_SEND(ca_event);
520 }
521
522 CA_EVENT(pac_thread_state_sigreturn_event,
523 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
524
525 static bool
machine_thread_state_check_sigreturn_token(arm_thread_state64_t * ts64,thread_t thread)526 machine_thread_state_check_sigreturn_token(
527 arm_thread_state64_t *ts64,
528 thread_t thread)
529 {
530 task_t task = current_task();
531 void *proc = get_bsdtask_info(task);
532 char *proc_name = (char *) "unknown";
533 bool token_matched = true;
534 bool kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
535 bool kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
536
537 if (kernel_signed_pc) {
538 /* Compute the sigreturn token */
539 uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
540 if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
541 __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK)) {
542 token_matched = false;
543 }
544 }
545
546 if (kernel_signed_lr) {
547 /* Compute the sigreturn token */
548 uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
549 if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
550 __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK)) {
551 token_matched = false;
552 }
553 }
554
555 if (token_matched) {
556 return true;
557 }
558
559 proc_name = proc_name_address(proc);
560 ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_sigreturn_event);
561 CA_EVENT_TYPE(pac_thread_state_sigreturn_event) * psig_event = ca_event->data;
562 strlcpy(psig_event->proc_name, proc_name, CA_PROCNAME_LEN);
563 CA_EVENT_SEND(ca_event);
564 return false;
565 }
566
567 #endif
568
569 /*
570 * Translate thread state arguments from userspace representation
571 */
572
573 kern_return_t
machine_thread_state_convert_from_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t tssf_flags)574 machine_thread_state_convert_from_user(
575 thread_t thread,
576 thread_flavor_t flavor,
577 thread_state_t tstate,
578 mach_msg_type_number_t count,
579 thread_state_t old_tstate,
580 mach_msg_type_number_t old_count,
581 thread_set_status_flags_t tssf_flags)
582 {
583 arm_thread_state64_t *ts64;
584 arm_thread_state64_t *old_ts64 = NULL;
585 bool only_set_pc = !!(tssf_flags & TSSF_ONLY_PC);
586
587 switch (flavor) {
588 case ARM_THREAD_STATE:
589 {
590 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
591
592 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
593 return KERN_SUCCESS;
594 }
595 ts64 = thread_state64(unified_state);
596
597 arm_unified_thread_state_t *old_unified_state = (arm_unified_thread_state_t *)old_tstate;
598 if (old_unified_state && old_count >= ARM_UNIFIED_THREAD_STATE_COUNT) {
599 old_ts64 = thread_state64(old_unified_state);
600 }
601 break;
602 }
603 case ARM_THREAD_STATE64:
604 {
605 if (count != ARM_THREAD_STATE64_COUNT) {
606 return KERN_SUCCESS;
607 }
608 ts64 = (arm_thread_state64_t *)tstate;
609
610 if (old_count == ARM_THREAD_STATE64_COUNT) {
611 old_ts64 = (arm_thread_state64_t *)old_tstate;
612 }
613 break;
614 }
615 default:
616 return KERN_SUCCESS;
617 }
618
619 if (only_set_pc) {
620 uint64_t new_pc = ts64->pc;
621 uint64_t new_flags = ts64->flags;
622 /* Only allow pc to be modified in new_state */
623 memcpy(ts64, old_ts64, sizeof(arm_thread_state64_t));
624 ts64->pc = new_pc;
625 ts64->flags = new_flags;
626 }
627
628 #if __has_feature(ptrauth_calls)
629
630 void *userland_diversifier = NULL;
631 bool kernel_signed_pc;
632 bool kernel_signed_lr;
633 bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
634 bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
635 bool task_div = !!(tssf_flags & TSSF_TASK_USER_DIV);
636
637 // Note that kernel threads never have disable_user_jop set
638 if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
639 !thread_is_64bit_addr(current_thread())) {
640 if ((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
641 !thread_is_64bit_addr(thread)) {
642 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
643 return KERN_SUCCESS;
644 }
645 // A JOP-disabled process must not set thread state on a JOP-enabled process
646 return KERN_PROTECTION_FAILURE;
647 }
648
649 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
650 if ((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
651 !thread_is_64bit_addr(thread)
652 ) {
653 return KERN_SUCCESS;
654 }
655 // Disallow setting unsigned thread state on JOP-enabled processes.
656 // Ignore flag and treat thread state arguments as signed, ptrauth
657 // poisoning will cause resulting thread state to be invalid
658 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
659 }
660
661 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
662 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
663 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
664 ptrauth_key_return_address);
665 if (ts64->lr == stripped_lr) {
666 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
667 // treat as IA-signed below (where auth failure may poison the value).
668 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
669 }
670 // Note that an IB-signed return address that happens to have a 0 signature value
671 // will also have been IA-signed (without this flag being set) and so will IA-auth
672 // correctly below.
673 }
674
675 if (arm_user_jop_disabled()) {
676 return KERN_SUCCESS;
677 }
678
679 kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
680 kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
681 /*
682 * Replace pc/lr with old state if allow only
683 * user ptr flag is passed and ptrs are marked
684 * kernel signed.
685 */
686 if ((tssf_flags & TSSF_CHECK_USER_FLAGS) &&
687 (kernel_signed_pc || kernel_signed_lr)) {
688 if (old_ts64 && old_count == count) {
689 /* Send a CA event if the thread state does not match */
690 machine_thread_state_check_pac_state(ts64, old_ts64);
691
692 /* Check if user ptrs needs to be replaced */
693 if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
694 kernel_signed_pc) {
695 ts64->pc = old_ts64->pc;
696 }
697
698 if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
699 !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
700 kernel_signed_lr) {
701 ts64->lr = old_ts64->lr;
702 if (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
703 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
704 } else {
705 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
706 }
707 }
708 }
709 }
710
711 /* Validate sigreturn token */
712 if (tssf_flags & TSSF_CHECK_SIGRETURN_TOKEN) {
713 bool token_matched = machine_thread_state_check_sigreturn_token(ts64, thread);
714 if ((tssf_flags & TSSF_ALLOW_ONLY_MATCHING_TOKEN) && !token_matched) {
715 return KERN_PROTECTION_FAILURE;
716 }
717 }
718
719 /* Get the userland diversifier */
720 if (random_div && old_ts64 && old_count == count) {
721 /* Get the random diversifier from the old thread state */
722 userland_diversifier = (void *)(long)(old_ts64->flags &
723 __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
724 } else if (thread_div) {
725 userland_diversifier = (void *)(long)(thread_get_sigreturn_diversifier(thread) &
726 __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
727 } else if (task_div) {
728 userland_diversifier =
729 (void *)(long)((get_threadtask(thread)->hardened_exception_action.signed_pc_key) &
730 __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
731 }
732
733 if (ts64->pc) {
734 uint64_t discriminator = ptrauth_string_discriminator("pc");
735 if (!kernel_signed_pc && userland_diversifier != 0) {
736 discriminator = ptrauth_blend_discriminator(userland_diversifier,
737 ptrauth_string_discriminator("pc"));
738 }
739 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
740 ptrauth_key_process_independent_code, discriminator,
741 thread->machine.jop_pid);
742 }
743 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
744 uint64_t discriminator = ptrauth_string_discriminator("lr");
745 if (!kernel_signed_lr && userland_diversifier != 0) {
746 discriminator = ptrauth_blend_discriminator(userland_diversifier,
747 ptrauth_string_discriminator("lr"));
748 }
749 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
750 ptrauth_key_process_independent_code, discriminator,
751 thread->machine.jop_pid);
752 }
753 if (ts64->sp) {
754 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
755 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
756 thread->machine.jop_pid);
757 }
758 if (ts64->fp) {
759 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
760 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
761 thread->machine.jop_pid);
762 }
763
764 return KERN_SUCCESS;
765 #else
766 // No conversion from userspace representation on this platform
767 (void)thread; (void)flavor; (void)tstate; (void)count;
768 (void)old_tstate; (void)old_count; (void)tssf_flags;
769 return KERN_SUCCESS;
770 #endif /* __has_feature(ptrauth_calls) */
771 }
772
773 #if __has_feature(ptrauth_calls)
774 bool
machine_thread_state_is_debug_flavor(int flavor)775 machine_thread_state_is_debug_flavor(int flavor)
776 {
777 if (flavor == ARM_DEBUG_STATE ||
778 flavor == ARM_DEBUG_STATE64 ||
779 flavor == ARM_DEBUG_STATE32) {
780 return true;
781 }
782 return false;
783 }
784 #endif /* __has_feature(ptrauth_calls) */
785
786 /*
787 * Translate signal context data pointer to userspace representation
788 */
789
790 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(thread_t thread,user_addr_t * uctxp)791 machine_thread_siguctx_pointer_convert_to_user(
792 thread_t thread,
793 user_addr_t *uctxp)
794 {
795 #if __has_feature(ptrauth_calls)
796 if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
797 !thread_is_64bit_addr(current_thread())) {
798 assert((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) || !thread_is_64bit_addr(thread));
799 return KERN_SUCCESS;
800 }
801
802 if (arm_user_jop_disabled()) {
803 return KERN_SUCCESS;
804 }
805
806 if (*uctxp) {
807 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
808 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"),
809 thread->machine.jop_pid);
810 }
811
812 return KERN_SUCCESS;
813 #else
814 // No conversion to userspace representation on this platform
815 (void)thread; (void)uctxp;
816 return KERN_SUCCESS;
817 #endif /* __has_feature(ptrauth_calls) */
818 }
819
820 /*
821 * Translate array of function pointer syscall arguments from userspace representation
822 */
823
824 kern_return_t
machine_thread_function_pointers_convert_from_user(thread_t thread,user_addr_t * fptrs,uint32_t count)825 machine_thread_function_pointers_convert_from_user(
826 thread_t thread,
827 user_addr_t *fptrs,
828 uint32_t count)
829 {
830 #if __has_feature(ptrauth_calls)
831 if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
832 !thread_is_64bit_addr(current_thread())) {
833 assert((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
834 !thread_is_64bit_addr(thread));
835 return KERN_SUCCESS;
836 }
837
838 if (arm_user_jop_disabled()) {
839 return KERN_SUCCESS;
840 }
841
842 while (count--) {
843 if (*fptrs) {
844 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
845 ptrauth_key_function_pointer, 0, thread->machine.jop_pid);
846 }
847 fptrs++;
848 }
849
850 return KERN_SUCCESS;
851 #else
852 // No conversion from userspace representation on this platform
853 (void)thread; (void)fptrs; (void)count;
854 return KERN_SUCCESS;
855 #endif /* __has_feature(ptrauth_calls) */
856 }
857
858 /*
859 * Routine: machine_thread_get_state
860 *
861 */
862 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)863 machine_thread_get_state(thread_t thread,
864 thread_flavor_t flavor,
865 thread_state_t tstate,
866 mach_msg_type_number_t * count)
867 {
868 switch (flavor) {
869 case THREAD_STATE_FLAVOR_LIST:
870 if (*count < 4) {
871 return KERN_INVALID_ARGUMENT;
872 }
873
874 tstate[0] = ARM_THREAD_STATE;
875 tstate[1] = ARM_VFP_STATE;
876 tstate[2] = ARM_EXCEPTION_STATE;
877 tstate[3] = ARM_DEBUG_STATE;
878 *count = 4;
879 break;
880
881 case THREAD_STATE_FLAVOR_LIST_NEW:
882 if (*count < 4) {
883 return KERN_INVALID_ARGUMENT;
884 }
885
886 tstate[0] = ARM_THREAD_STATE;
887 tstate[1] = ARM_VFP_STATE;
888 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
889 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
890 *count = 4;
891 break;
892
893 case THREAD_STATE_FLAVOR_LIST_10_15:
894 if (*count < 5) {
895 return KERN_INVALID_ARGUMENT;
896 }
897
898 tstate[0] = ARM_THREAD_STATE;
899 tstate[1] = ARM_VFP_STATE;
900 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
901 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
902 tstate[4] = ARM_PAGEIN_STATE;
903 *count = 5;
904 break;
905
906 case ARM_THREAD_STATE:
907 {
908 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
909 if (rn) {
910 return rn;
911 }
912 break;
913 }
914 case ARM_THREAD_STATE32:
915 {
916 if (thread_is_64bit_data(thread)) {
917 return KERN_INVALID_ARGUMENT;
918 }
919
920 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
921 if (rn) {
922 return rn;
923 }
924 break;
925 }
926 #if __arm64__
927 case ARM_THREAD_STATE64:
928 {
929 if (!thread_is_64bit_data(thread)) {
930 return KERN_INVALID_ARGUMENT;
931 }
932
933 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, thread->machine.upcb);
934 if (rn) {
935 return rn;
936 }
937
938 break;
939 }
940 #endif
941 case ARM_EXCEPTION_STATE:{
942 struct arm_exception_state *state;
943 struct arm_saved_state32 *saved_state;
944
945 if (*count < ARM_EXCEPTION_STATE_COUNT) {
946 return KERN_INVALID_ARGUMENT;
947 }
948 if (thread_is_64bit_data(thread)) {
949 return KERN_INVALID_ARGUMENT;
950 }
951
952 state = (struct arm_exception_state *) tstate;
953 saved_state = saved_state32(thread->machine.upcb);
954
955 state->exception = saved_state->exception;
956 state->fsr = (uint32_t) saved_state->esr;
957 state->far = saved_state->far;
958
959 *count = ARM_EXCEPTION_STATE_COUNT;
960 break;
961 }
962 case ARM_EXCEPTION_STATE64:{
963 struct arm_exception_state64 *state;
964 struct arm_saved_state64 *saved_state;
965
966 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
967 return KERN_INVALID_ARGUMENT;
968 }
969 if (!thread_is_64bit_data(thread)) {
970 return KERN_INVALID_ARGUMENT;
971 }
972
973 state = (struct arm_exception_state64 *) tstate;
974 saved_state = saved_state64(thread->machine.upcb);
975
976 state->exception = 0;
977 state->far = saved_state->far;
978 state->esr = (uint32_t) saved_state->esr;
979
980 *count = ARM_EXCEPTION_STATE64_COUNT;
981 break;
982 }
983 case ARM_EXCEPTION_STATE64_V2:{
984 struct arm_exception_state64_v2 *state;
985 struct arm_saved_state64 *saved_state;
986
987 if (*count < ARM_EXCEPTION_STATE64_V2_COUNT) {
988 return KERN_INVALID_ARGUMENT;
989 }
990 if (!thread_is_64bit_data(thread)) {
991 return KERN_INVALID_ARGUMENT;
992 }
993
994 state = (struct arm_exception_state64_v2 *) tstate;
995 saved_state = saved_state64(thread->machine.upcb);
996
997 state->far = saved_state->far;
998 state->esr = saved_state->esr;
999
1000 *count = ARM_EXCEPTION_STATE64_V2_COUNT;
1001 break;
1002 }
1003 case ARM_DEBUG_STATE:{
1004 arm_legacy_debug_state_t *state;
1005 arm_debug_state32_t *thread_state;
1006
1007 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
1008 return KERN_INVALID_ARGUMENT;
1009 }
1010
1011 if (thread_is_64bit_data(thread)) {
1012 return KERN_INVALID_ARGUMENT;
1013 }
1014
1015 state = (arm_legacy_debug_state_t *) tstate;
1016 thread_state = find_debug_state32(thread);
1017
1018 if (thread_state == NULL) {
1019 bzero(state, sizeof(arm_legacy_debug_state_t));
1020 } else {
1021 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
1022 }
1023
1024 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
1025 break;
1026 }
1027 case ARM_DEBUG_STATE32:{
1028 arm_debug_state32_t *state;
1029 arm_debug_state32_t *thread_state;
1030
1031 if (*count < ARM_DEBUG_STATE32_COUNT) {
1032 return KERN_INVALID_ARGUMENT;
1033 }
1034
1035 if (thread_is_64bit_data(thread)) {
1036 return KERN_INVALID_ARGUMENT;
1037 }
1038
1039 state = (arm_debug_state32_t *) tstate;
1040 thread_state = find_debug_state32(thread);
1041
1042 if (thread_state == NULL) {
1043 bzero(state, sizeof(arm_debug_state32_t));
1044 } else {
1045 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
1046 }
1047
1048 *count = ARM_DEBUG_STATE32_COUNT;
1049 break;
1050 }
1051
1052 case ARM_DEBUG_STATE64:{
1053 arm_debug_state64_t *state;
1054 arm_debug_state64_t *thread_state;
1055
1056 if (*count < ARM_DEBUG_STATE64_COUNT) {
1057 return KERN_INVALID_ARGUMENT;
1058 }
1059
1060 if (!thread_is_64bit_data(thread)) {
1061 return KERN_INVALID_ARGUMENT;
1062 }
1063
1064 state = (arm_debug_state64_t *) tstate;
1065 thread_state = find_debug_state64(thread);
1066
1067 if (thread_state == NULL) {
1068 bzero(state, sizeof(arm_debug_state64_t));
1069 } else {
1070 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
1071 }
1072
1073 *count = ARM_DEBUG_STATE64_COUNT;
1074 break;
1075 }
1076
1077 case ARM_VFP_STATE:{
1078 struct arm_vfp_state *state;
1079 arm_neon_saved_state32_t *thread_state;
1080 unsigned int max;
1081
1082 if (*count < ARM_VFP_STATE_COUNT) {
1083 if (*count < ARM_VFPV2_STATE_COUNT) {
1084 return KERN_INVALID_ARGUMENT;
1085 } else {
1086 *count = ARM_VFPV2_STATE_COUNT;
1087 }
1088 }
1089
1090 if (*count == ARM_VFPV2_STATE_COUNT) {
1091 max = 32;
1092 } else {
1093 max = 64;
1094 }
1095
1096 state = (struct arm_vfp_state *) tstate;
1097 thread_state = neon_state32(thread->machine.uNeon);
1098 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
1099
1100 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
1101 *count = (max + 1);
1102 break;
1103 }
1104 case ARM_NEON_STATE:{
1105 arm_neon_state_t *state;
1106 arm_neon_saved_state32_t *thread_state;
1107
1108 if (*count < ARM_NEON_STATE_COUNT) {
1109 return KERN_INVALID_ARGUMENT;
1110 }
1111
1112 if (thread_is_64bit_data(thread)) {
1113 return KERN_INVALID_ARGUMENT;
1114 }
1115
1116 state = (arm_neon_state_t *)tstate;
1117 thread_state = neon_state32(thread->machine.uNeon);
1118
1119 assert(sizeof(*thread_state) == sizeof(*state));
1120 bcopy(thread_state, state, sizeof(arm_neon_state_t));
1121
1122 *count = ARM_NEON_STATE_COUNT;
1123 break;
1124 }
1125
1126 case ARM_NEON_STATE64:{
1127 arm_neon_state64_t *state;
1128 arm_neon_saved_state64_t *thread_state;
1129
1130 if (*count < ARM_NEON_STATE64_COUNT) {
1131 return KERN_INVALID_ARGUMENT;
1132 }
1133
1134 if (!thread_is_64bit_data(thread)) {
1135 return KERN_INVALID_ARGUMENT;
1136 }
1137
1138 state = (arm_neon_state64_t *)tstate;
1139 thread_state = neon_state64(thread->machine.uNeon);
1140
1141 /* For now, these are identical */
1142 assert(sizeof(*state) == sizeof(*thread_state));
1143 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
1144
1145
1146 *count = ARM_NEON_STATE64_COUNT;
1147 break;
1148 }
1149
1150
1151 case ARM_PAGEIN_STATE: {
1152 arm_pagein_state_t *state;
1153
1154 if (*count < ARM_PAGEIN_STATE_COUNT) {
1155 return KERN_INVALID_ARGUMENT;
1156 }
1157
1158 state = (arm_pagein_state_t *)tstate;
1159 state->__pagein_error = thread->t_pagein_error;
1160
1161 *count = ARM_PAGEIN_STATE_COUNT;
1162 break;
1163 }
1164
1165
1166 default:
1167 return KERN_INVALID_ARGUMENT;
1168 }
1169 return KERN_SUCCESS;
1170 }
1171
1172
1173 /*
1174 * Routine: machine_thread_get_kern_state
1175 *
1176 */
1177 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1178 machine_thread_get_kern_state(thread_t thread,
1179 thread_flavor_t flavor,
1180 thread_state_t tstate,
1181 mach_msg_type_number_t * count)
1182 {
1183 /*
1184 * This works only for an interrupted kernel thread
1185 */
1186 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1187 return KERN_FAILURE;
1188 }
1189
1190 switch (flavor) {
1191 case ARM_THREAD_STATE:
1192 {
1193 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1194 if (rn) {
1195 return rn;
1196 }
1197 break;
1198 }
1199 case ARM_THREAD_STATE32:
1200 {
1201 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1202 if (rn) {
1203 return rn;
1204 }
1205 break;
1206 }
1207 #if __arm64__
1208 case ARM_THREAD_STATE64:
1209 {
1210 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1211 if (rn) {
1212 return rn;
1213 }
1214 break;
1215 }
1216 #endif
1217 default:
1218 return KERN_INVALID_ARGUMENT;
1219 }
1220 return KERN_SUCCESS;
1221 }
1222
1223 void
machine_thread_switch_addrmode(thread_t thread)1224 machine_thread_switch_addrmode(thread_t thread)
1225 {
1226 if (task_has_64Bit_data(get_threadtask(thread))) {
1227 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
1228 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
1229 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1230 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1231
1232 /*
1233 * Reinitialize the NEON state.
1234 */
1235 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1236 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
1237 } else {
1238 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
1239 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
1240 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1241 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1242
1243 /*
1244 * Reinitialize the NEON state.
1245 */
1246 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1247 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
1248 }
1249 }
1250
1251 extern long long arm_debug_get(void);
1252
1253 /*
1254 * Routine: machine_thread_set_state
1255 *
1256 */
1257 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)1258 machine_thread_set_state(thread_t thread,
1259 thread_flavor_t flavor,
1260 thread_state_t tstate,
1261 mach_msg_type_number_t count)
1262 {
1263 kern_return_t rn;
1264
1265 switch (flavor) {
1266 case ARM_THREAD_STATE:
1267 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
1268 if (rn) {
1269 return rn;
1270 }
1271 break;
1272
1273 case ARM_THREAD_STATE32:
1274 if (thread_is_64bit_data(thread)) {
1275 return KERN_INVALID_ARGUMENT;
1276 }
1277
1278 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
1279 if (rn) {
1280 return rn;
1281 }
1282 break;
1283
1284 #if __arm64__
1285 case ARM_THREAD_STATE64:
1286 if (!thread_is_64bit_data(thread)) {
1287 return KERN_INVALID_ARGUMENT;
1288 }
1289
1290
1291 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
1292 if (rn) {
1293 return rn;
1294 }
1295 break;
1296 #endif
1297 case ARM_EXCEPTION_STATE:{
1298 if (count != ARM_EXCEPTION_STATE_COUNT) {
1299 return KERN_INVALID_ARGUMENT;
1300 }
1301 if (thread_is_64bit_data(thread)) {
1302 return KERN_INVALID_ARGUMENT;
1303 }
1304
1305 break;
1306 }
1307 case ARM_EXCEPTION_STATE64:{
1308 if (count != ARM_EXCEPTION_STATE64_COUNT) {
1309 return KERN_INVALID_ARGUMENT;
1310 }
1311 if (!thread_is_64bit_data(thread)) {
1312 return KERN_INVALID_ARGUMENT;
1313 }
1314
1315 break;
1316 }
1317 case ARM_EXCEPTION_STATE64_V2:{
1318 if (count != ARM_EXCEPTION_STATE64_V2_COUNT) {
1319 return KERN_INVALID_ARGUMENT;
1320 }
1321 if (!thread_is_64bit_data(thread)) {
1322 return KERN_INVALID_ARGUMENT;
1323 }
1324
1325 break;
1326 }
1327 case ARM_DEBUG_STATE:
1328 {
1329 arm_legacy_debug_state_t *state;
1330 boolean_t enabled = FALSE;
1331 unsigned int i;
1332
1333 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
1334 return KERN_INVALID_ARGUMENT;
1335 }
1336 if (thread_is_64bit_data(thread)) {
1337 return KERN_INVALID_ARGUMENT;
1338 }
1339
1340 state = (arm_legacy_debug_state_t *) tstate;
1341
1342 for (i = 0; i < 16; i++) {
1343 /* do not allow context IDs to be set */
1344 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1345 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1346 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1347 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1348 return KERN_PROTECTION_FAILURE;
1349 }
1350 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1351 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1352 enabled = TRUE;
1353 }
1354 }
1355
1356 if (!enabled) {
1357 free_debug_state(thread);
1358 } else {
1359 arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread);
1360
1361 if (thread_state == NULL) {
1362 return KERN_FAILURE;
1363 }
1364
1365 for (i = 0; i < 16; i++) {
1366 /* set appropriate privilege; mask out unknown bits */
1367 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1368 | ARM_DBGBCR_MATCH_MASK
1369 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1370 | ARM_DBG_CR_ENABLE_MASK))
1371 | ARM_DBGBCR_TYPE_IVA
1372 | ARM_DBG_CR_LINKED_UNLINKED
1373 | ARM_DBG_CR_SECURITY_STATE_BOTH
1374 | ARM_DBG_CR_MODE_CONTROL_USER;
1375 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1376 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1377 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1378 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1379 | ARM_DBG_CR_ENABLE_MASK))
1380 | ARM_DBG_CR_LINKED_UNLINKED
1381 | ARM_DBG_CR_SECURITY_STATE_BOTH
1382 | ARM_DBG_CR_MODE_CONTROL_USER;
1383 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1384 }
1385
1386 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1387 }
1388
1389 if (thread == current_thread()) {
1390 arm_debug_set32(thread->machine.DebugData);
1391 }
1392
1393 break;
1394 }
1395 case ARM_DEBUG_STATE32:
1396 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1397 {
1398 arm_debug_state32_t *state;
1399 boolean_t enabled = FALSE;
1400 unsigned int i;
1401
1402 if (count != ARM_DEBUG_STATE32_COUNT) {
1403 return KERN_INVALID_ARGUMENT;
1404 }
1405 if (thread_is_64bit_data(thread)) {
1406 return KERN_INVALID_ARGUMENT;
1407 }
1408
1409 state = (arm_debug_state32_t *) tstate;
1410
1411 if (state->mdscr_el1 & MDSCR_SS) {
1412 enabled = TRUE;
1413 }
1414
1415 for (i = 0; i < 16; i++) {
1416 /* do not allow context IDs to be set */
1417 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1418 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1419 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1420 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1421 return KERN_PROTECTION_FAILURE;
1422 }
1423 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1424 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1425 enabled = TRUE;
1426 }
1427 }
1428
1429 if (!enabled) {
1430 free_debug_state(thread);
1431 } else {
1432 arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread);
1433
1434 if (thread_state == NULL) {
1435 return KERN_FAILURE;
1436 }
1437
1438 if (state->mdscr_el1 & MDSCR_SS) {
1439 thread_state->mdscr_el1 |= MDSCR_SS;
1440 } else {
1441 thread_state->mdscr_el1 &= ~MDSCR_SS;
1442 }
1443
1444 for (i = 0; i < 16; i++) {
1445 /* set appropriate privilege; mask out unknown bits */
1446 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1447 | ARM_DBGBCR_MATCH_MASK
1448 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1449 | ARM_DBG_CR_ENABLE_MASK))
1450 | ARM_DBGBCR_TYPE_IVA
1451 | ARM_DBG_CR_LINKED_UNLINKED
1452 | ARM_DBG_CR_SECURITY_STATE_BOTH
1453 | ARM_DBG_CR_MODE_CONTROL_USER;
1454 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1455 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1456 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1457 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1458 | ARM_DBG_CR_ENABLE_MASK))
1459 | ARM_DBG_CR_LINKED_UNLINKED
1460 | ARM_DBG_CR_SECURITY_STATE_BOTH
1461 | ARM_DBG_CR_MODE_CONTROL_USER;
1462 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1463 }
1464 }
1465
1466 if (thread == current_thread()) {
1467 arm_debug_set32(thread->machine.DebugData);
1468 }
1469
1470 break;
1471 }
1472
1473 case ARM_DEBUG_STATE64:
1474 {
1475 arm_debug_state64_t *state;
1476 boolean_t enabled = FALSE;
1477 unsigned int i;
1478
1479 if (count != ARM_DEBUG_STATE64_COUNT) {
1480 return KERN_INVALID_ARGUMENT;
1481 }
1482 if (!thread_is_64bit_data(thread)) {
1483 return KERN_INVALID_ARGUMENT;
1484 }
1485
1486 state = (arm_debug_state64_t *) tstate;
1487
1488 if (state->mdscr_el1 & MDSCR_SS) {
1489 enabled = TRUE;
1490 }
1491
1492 for (i = 0; i < 16; i++) {
1493 /* do not allow context IDs to be set */
1494 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1495 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1496 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1497 return KERN_PROTECTION_FAILURE;
1498 }
1499 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1500 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1501 enabled = TRUE;
1502 }
1503 }
1504
1505 if (!enabled) {
1506 free_debug_state(thread);
1507 } else {
1508 arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread);
1509
1510 if (thread_state == NULL) {
1511 return KERN_FAILURE;
1512 }
1513
1514 if (state->mdscr_el1 & MDSCR_SS) {
1515 thread_state->mdscr_el1 |= MDSCR_SS;
1516 } else {
1517 thread_state->mdscr_el1 &= ~MDSCR_SS;
1518 }
1519
1520 for (i = 0; i < 16; i++) {
1521 /* set appropriate privilege; mask out unknown bits */
1522 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1523 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1524 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1525 | ARM_DBG_CR_ENABLE_MASK))
1526 | ARM_DBGBCR_TYPE_IVA
1527 | ARM_DBG_CR_LINKED_UNLINKED
1528 | ARM_DBG_CR_SECURITY_STATE_BOTH
1529 | ARM_DBG_CR_MODE_CONTROL_USER;
1530 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1531 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1532 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1533 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1534 | ARM_DBG_CR_ENABLE_MASK))
1535 | ARM_DBG_CR_LINKED_UNLINKED
1536 | ARM_DBG_CR_SECURITY_STATE_BOTH
1537 | ARM_DBG_CR_MODE_CONTROL_USER;
1538 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1539 }
1540 }
1541
1542 if (thread == current_thread()) {
1543 arm_debug_set64(thread->machine.DebugData);
1544 }
1545
1546 break;
1547 }
1548
1549 case ARM_VFP_STATE:{
1550 struct arm_vfp_state *state;
1551 arm_neon_saved_state32_t *thread_state;
1552 unsigned int max;
1553
1554 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1555 return KERN_INVALID_ARGUMENT;
1556 }
1557
1558 if (count == ARM_VFPV2_STATE_COUNT) {
1559 max = 32;
1560 } else {
1561 max = 64;
1562 }
1563
1564 state = (struct arm_vfp_state *) tstate;
1565 thread_state = neon_state32(thread->machine.uNeon);
1566 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1567
1568 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1569
1570 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1571 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1572 break;
1573 }
1574
1575 case ARM_NEON_STATE:{
1576 arm_neon_state_t *state;
1577 arm_neon_saved_state32_t *thread_state;
1578
1579 if (count != ARM_NEON_STATE_COUNT) {
1580 return KERN_INVALID_ARGUMENT;
1581 }
1582
1583 if (thread_is_64bit_data(thread)) {
1584 return KERN_INVALID_ARGUMENT;
1585 }
1586
1587 state = (arm_neon_state_t *)tstate;
1588 thread_state = neon_state32(thread->machine.uNeon);
1589
1590 assert(sizeof(*state) == sizeof(*thread_state));
1591 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1592
1593 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1594 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1595 break;
1596 }
1597
1598 case ARM_NEON_STATE64:{
1599 arm_neon_state64_t *state;
1600 arm_neon_saved_state64_t *thread_state;
1601
1602 if (count != ARM_NEON_STATE64_COUNT) {
1603 return KERN_INVALID_ARGUMENT;
1604 }
1605
1606 if (!thread_is_64bit_data(thread)) {
1607 return KERN_INVALID_ARGUMENT;
1608 }
1609
1610 state = (arm_neon_state64_t *)tstate;
1611 thread_state = neon_state64(thread->machine.uNeon);
1612
1613 assert(sizeof(*state) == sizeof(*thread_state));
1614 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1615
1616
1617 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1618 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1619 break;
1620 }
1621
1622
1623 default:
1624 return KERN_INVALID_ARGUMENT;
1625 }
1626 return KERN_SUCCESS;
1627 }
1628
1629 mach_vm_address_t
machine_thread_pc(thread_t thread)1630 machine_thread_pc(thread_t thread)
1631 {
1632 struct arm_saved_state *ss = get_user_regs(thread);
1633 return (mach_vm_address_t)get_saved_state_pc(ss);
1634 }
1635
1636 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)1637 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1638 {
1639 set_user_saved_state_pc(get_user_regs(thread), (register_t)pc);
1640 }
1641
1642 /*
1643 * Routine: machine_thread_state_initialize
1644 *
1645 */
1646 void
machine_thread_state_initialize(thread_t thread)1647 machine_thread_state_initialize(thread_t thread)
1648 {
1649 arm_context_t *context = thread->machine.contextData;
1650
1651 /*
1652 * Should always be set up later. For a kernel thread, we don't care
1653 * about this state. For a user thread, we'll set the state up in
1654 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1655 */
1656
1657 if (context != NULL) {
1658 bzero(&context->ss.uss, sizeof(context->ss.uss));
1659 bzero(&context->ns.uns, sizeof(context->ns.uns));
1660
1661 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1662 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1663 } else {
1664 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1665 }
1666 context->ss.ss_64.cpsr = PSR64_USER64_DEFAULT;
1667 }
1668
1669 thread->machine.DebugData = NULL;
1670
1671 #if defined(HAS_APPLE_PAC)
1672 /* Sign the initial user-space thread state */
1673 if (thread->machine.upcb != NULL) {
1674 uint64_t intr = ml_pac_safe_interrupts_disable();
1675 asm volatile (
1676 "mov x0, %[iss]" "\n"
1677 "mov x1, #0" "\n"
1678 "mov w2, %w[usr]" "\n"
1679 "mov x3, #0" "\n"
1680 "mov x4, #0" "\n"
1681 "mov x5, #0" "\n"
1682 "msr SPSel, #1" "\n"
1683 VERIFY_USER_THREAD_STATE_INSTR "\n"
1684 "mov x6, lr" "\n"
1685 "bl _ml_sign_thread_state" "\n"
1686 "msr SPSel, #0" "\n"
1687 "mov lr, x6" "\n"
1688 :
1689 : [iss] "r"(thread->machine.upcb), [usr] "r"(thread->machine.upcb->ss_64.cpsr),
1690 VERIFY_USER_THREAD_STATE_INPUTS
1691 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
1692 );
1693 ml_pac_safe_interrupts_restore(intr);
1694 }
1695 #endif /* defined(HAS_APPLE_PAC) */
1696 }
1697
1698 /*
1699 * Routine: machine_thread_dup
1700 *
1701 */
1702 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)1703 machine_thread_dup(thread_t self,
1704 thread_t target,
1705 __unused boolean_t is_corpse)
1706 {
1707 struct arm_saved_state *self_saved_state;
1708 struct arm_saved_state *target_saved_state;
1709
1710 target->machine.cthread_self = self->machine.cthread_self;
1711
1712 self_saved_state = self->machine.upcb;
1713 target_saved_state = target->machine.upcb;
1714 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1715 #if defined(HAS_APPLE_PAC)
1716 if (!is_corpse && is_saved_state64(self_saved_state)) {
1717 check_and_sign_copied_user_thread_state(target_saved_state, self_saved_state);
1718 }
1719 #endif /* defined(HAS_APPLE_PAC) */
1720
1721 arm_neon_saved_state_t *self_neon_state = self->machine.uNeon;
1722 arm_neon_saved_state_t *target_neon_state = target->machine.uNeon;
1723 bcopy(self_neon_state, target_neon_state, sizeof(*target_neon_state));
1724
1725 #if HAVE_MACHINE_THREAD_MATRIX_STATE
1726 if (self->machine.umatrix_hdr) {
1727 machine_thread_matrix_state_dup(target);
1728 }
1729 #endif
1730
1731 return KERN_SUCCESS;
1732 }
1733
1734 /*
1735 * Routine: get_user_regs
1736 *
1737 */
1738 struct arm_saved_state *
get_user_regs(thread_t thread)1739 get_user_regs(thread_t thread)
1740 {
1741 return thread->machine.upcb;
1742 }
1743
1744 arm_neon_saved_state_t *
get_user_neon_regs(thread_t thread)1745 get_user_neon_regs(thread_t thread)
1746 {
1747 return thread->machine.uNeon;
1748 }
1749
1750 /*
1751 * Routine: find_user_regs
1752 *
1753 */
1754 struct arm_saved_state *
find_user_regs(thread_t thread)1755 find_user_regs(thread_t thread)
1756 {
1757 return thread->machine.upcb;
1758 }
1759
1760 /*
1761 * Routine: find_kern_regs
1762 *
1763 */
1764 struct arm_saved_state *
find_kern_regs(thread_t thread)1765 find_kern_regs(thread_t thread)
1766 {
1767 /*
1768 * This works only for an interrupted kernel thread
1769 */
1770 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1771 return (struct arm_saved_state *) NULL;
1772 } else {
1773 return getCpuDatap()->cpu_int_state;
1774 }
1775 }
1776
1777 arm_debug_state32_t *
find_debug_state32(thread_t thread)1778 find_debug_state32(thread_t thread)
1779 {
1780 if (thread && thread->machine.DebugData) {
1781 return &(thread->machine.DebugData->uds.ds32);
1782 } else {
1783 return NULL;
1784 }
1785 }
1786
1787 arm_debug_state64_t *
find_debug_state64(thread_t thread)1788 find_debug_state64(thread_t thread)
1789 {
1790 if (thread && thread->machine.DebugData) {
1791 return &(thread->machine.DebugData->uds.ds64);
1792 } else {
1793 return NULL;
1794 }
1795 }
1796
1797 os_refgrp_decl(static, dbg_refgrp, "arm_debug_state", NULL);
1798
1799 /**
1800 * Finds the debug state for the given 64 bit thread, allocating one if it
1801 * does not exist.
1802 *
1803 * @param thread 64 bit thread to find or allocate debug state for
1804 *
1805 * @returns A pointer to the given thread's 64 bit debug state or a null
1806 * pointer if the given thread is null or the allocation of a new
1807 * debug state fails.
1808 */
1809 arm_debug_state64_t *
find_or_allocate_debug_state64(thread_t thread)1810 find_or_allocate_debug_state64(thread_t thread)
1811 {
1812 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1813 if (thread != NULL && thread_state == NULL) {
1814 thread->machine.DebugData = zalloc_flags(ads_zone,
1815 Z_WAITOK | Z_NOFAIL);
1816 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1817 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1818 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1819 os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1820 thread_state = find_debug_state64(thread);
1821 }
1822 return thread_state;
1823 }
1824
1825 /**
1826 * Finds the debug state for the given 32 bit thread, allocating one if it
1827 * does not exist.
1828 *
1829 * @param thread 32 bit thread to find or allocate debug state for
1830 *
1831 * @returns A pointer to the given thread's 32 bit debug state or a null
1832 * pointer if the given thread is null or the allocation of a new
1833 * debug state fails.
1834 */
1835 arm_debug_state32_t *
find_or_allocate_debug_state32(thread_t thread)1836 find_or_allocate_debug_state32(thread_t thread)
1837 {
1838 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1839 if (thread != NULL && thread_state == NULL) {
1840 thread->machine.DebugData = zalloc_flags(ads_zone,
1841 Z_WAITOK | Z_NOFAIL);
1842 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1843 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1844 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1845 os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1846 thread_state = find_debug_state32(thread);
1847 }
1848 return thread_state;
1849 }
1850
1851 /**
1852 * Frees a thread's debug state if allocated. Otherwise does nothing.
1853 *
1854 * @param thread thread to free the debug state of
1855 */
1856 static inline void
free_debug_state(thread_t thread)1857 free_debug_state(thread_t thread)
1858 {
1859 if (thread != NULL && thread->machine.DebugData != NULL) {
1860 arm_debug_state_t *pTmp = thread->machine.DebugData;
1861 thread->machine.DebugData = NULL;
1862
1863 if (os_ref_release(&pTmp->ref) == 0) {
1864 zfree(ads_zone, pTmp);
1865 }
1866 }
1867 }
1868
1869 /*
1870 * Routine: thread_userstack
1871 *
1872 */
1873 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,boolean_t is_64bit_data)1874 thread_userstack(__unused thread_t thread,
1875 int flavor,
1876 thread_state_t tstate,
1877 unsigned int count,
1878 mach_vm_offset_t * user_stack,
1879 int * customstack,
1880 boolean_t is_64bit_data
1881 )
1882 {
1883 register_t sp;
1884
1885 switch (flavor) {
1886 case ARM_THREAD_STATE:
1887 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1888 #if __arm64__
1889 if (is_64bit_data) {
1890 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1891 } else
1892 #endif
1893 {
1894 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1895 }
1896
1897 break;
1898 }
1899
1900 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1901 OS_FALLTHROUGH;
1902 case ARM_THREAD_STATE32:
1903 if (count != ARM_THREAD_STATE32_COUNT) {
1904 return KERN_INVALID_ARGUMENT;
1905 }
1906 if (is_64bit_data) {
1907 return KERN_INVALID_ARGUMENT;
1908 }
1909
1910 sp = ((arm_thread_state32_t *)tstate)->sp;
1911 break;
1912 #if __arm64__
1913 case ARM_THREAD_STATE64:
1914 if (count != ARM_THREAD_STATE64_COUNT) {
1915 return KERN_INVALID_ARGUMENT;
1916 }
1917 if (!is_64bit_data) {
1918 return KERN_INVALID_ARGUMENT;
1919 }
1920
1921 sp = ((arm_thread_state32_t *)tstate)->sp;
1922 break;
1923 #endif
1924 default:
1925 return KERN_INVALID_ARGUMENT;
1926 }
1927
1928 if (sp) {
1929 *user_stack = CAST_USER_ADDR_T(sp);
1930 if (customstack) {
1931 *customstack = 1;
1932 }
1933 } else {
1934 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1935 if (customstack) {
1936 *customstack = 0;
1937 }
1938 }
1939
1940 return KERN_SUCCESS;
1941 }
1942
1943 /*
1944 * thread_userstackdefault:
1945 *
1946 * Return the default stack location for the
1947 * thread, if otherwise unknown.
1948 */
1949 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)1950 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1951 boolean_t is64bit)
1952 {
1953 if (is64bit) {
1954 *default_user_stack = USRSTACK64;
1955 } else {
1956 *default_user_stack = USRSTACK;
1957 }
1958
1959 return KERN_SUCCESS;
1960 }
1961
1962 /*
1963 * Routine: thread_setuserstack
1964 *
1965 */
1966 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)1967 thread_setuserstack(thread_t thread,
1968 mach_vm_address_t user_stack)
1969 {
1970 struct arm_saved_state *sv;
1971
1972 sv = get_user_regs(thread);
1973
1974 set_saved_state_sp(sv, user_stack);
1975
1976 return;
1977 }
1978
1979 /*
1980 * Routine: thread_adjuserstack
1981 *
1982 */
1983 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)1984 thread_adjuserstack(thread_t thread,
1985 int adjust)
1986 {
1987 struct arm_saved_state *sv;
1988 uint64_t sp;
1989
1990 sv = get_user_regs(thread);
1991
1992 sp = get_saved_state_sp(sv);
1993 sp += adjust;
1994 set_saved_state_sp(sv, sp);
1995
1996 return sp;
1997 }
1998
1999
2000 /*
2001 * Routine: thread_setentrypoint
2002 *
2003 */
2004 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)2005 thread_setentrypoint(thread_t thread,
2006 mach_vm_offset_t entry)
2007 {
2008 struct arm_saved_state *sv;
2009
2010 #if HAS_APPLE_PAC
2011 uint64_t intr = ml_pac_safe_interrupts_disable();
2012 #endif
2013
2014 sv = get_user_regs(thread);
2015
2016 set_user_saved_state_pc(sv, entry);
2017
2018 #if HAS_APPLE_PAC
2019 ml_pac_safe_interrupts_restore(intr);
2020 #endif
2021
2022 return;
2023 }
2024
2025 /*
2026 * Routine: thread_entrypoint
2027 *
2028 */
2029 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)2030 thread_entrypoint(__unused thread_t thread,
2031 int flavor,
2032 thread_state_t tstate,
2033 unsigned int count,
2034 mach_vm_offset_t * entry_point
2035 )
2036 {
2037 switch (flavor) {
2038 case ARM_THREAD_STATE:
2039 {
2040 struct arm_thread_state *state;
2041
2042 if (count != ARM_THREAD_STATE_COUNT) {
2043 return KERN_INVALID_ARGUMENT;
2044 }
2045
2046 state = (struct arm_thread_state *) tstate;
2047
2048 /*
2049 * If a valid entry point is specified, use it.
2050 */
2051 if (state->pc) {
2052 *entry_point = CAST_USER_ADDR_T(state->pc);
2053 } else {
2054 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
2055 }
2056 }
2057 break;
2058
2059 case ARM_THREAD_STATE64:
2060 {
2061 struct arm_thread_state64 *state;
2062
2063 if (count != ARM_THREAD_STATE64_COUNT) {
2064 return KERN_INVALID_ARGUMENT;
2065 }
2066
2067 state = (struct arm_thread_state64*) tstate;
2068
2069 /*
2070 * If a valid entry point is specified, use it.
2071 */
2072 if (state->pc) {
2073 *entry_point = CAST_USER_ADDR_T(state->pc);
2074 } else {
2075 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
2076 }
2077
2078 break;
2079 }
2080 default:
2081 return KERN_INVALID_ARGUMENT;
2082 }
2083
2084 return KERN_SUCCESS;
2085 }
2086
2087
2088 /*
2089 * Routine: thread_set_child
2090 *
2091 */
2092 void
thread_set_child(thread_t child,int pid)2093 thread_set_child(thread_t child,
2094 int pid)
2095 {
2096 struct arm_saved_state *child_state;
2097
2098 child_state = get_user_regs(child);
2099
2100 set_user_saved_state_reg(child_state, 0, pid);
2101 set_user_saved_state_reg(child_state, 1, 1ULL);
2102 }
2103
2104
2105 struct arm_act_context {
2106 struct arm_unified_thread_state ss;
2107 #if __ARM_VFP__
2108 struct arm_neon_saved_state ns;
2109 #endif
2110 };
2111
2112 /*
2113 * Routine: act_thread_csave
2114 *
2115 */
2116 void *
act_thread_csave(void)2117 act_thread_csave(void)
2118 {
2119 struct arm_act_context *ic;
2120 kern_return_t kret;
2121 unsigned int val;
2122 thread_t thread = current_thread();
2123
2124 ic = kalloc_type(struct arm_act_context, Z_WAITOK);
2125 if (ic == (struct arm_act_context *) NULL) {
2126 return (void *) 0;
2127 }
2128
2129 val = ARM_UNIFIED_THREAD_STATE_COUNT;
2130 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
2131 if (kret != KERN_SUCCESS) {
2132 kfree_type(struct arm_act_context, ic);
2133 return (void *) 0;
2134 }
2135
2136 #if __ARM_VFP__
2137 if (thread_is_64bit_data(thread)) {
2138 val = ARM_NEON_STATE64_COUNT;
2139 kret = machine_thread_get_state(thread,
2140 ARM_NEON_STATE64,
2141 (thread_state_t)&ic->ns,
2142 &val);
2143 } else {
2144 val = ARM_NEON_STATE_COUNT;
2145 kret = machine_thread_get_state(thread,
2146 ARM_NEON_STATE,
2147 (thread_state_t)&ic->ns,
2148 &val);
2149 }
2150 if (kret != KERN_SUCCESS) {
2151 kfree_type(struct arm_act_context, ic);
2152 return (void *) 0;
2153 }
2154 #endif
2155 return ic;
2156 }
2157
2158 /*
2159 * Routine: act_thread_catt
2160 *
2161 */
2162 void
act_thread_catt(void * ctx)2163 act_thread_catt(void * ctx)
2164 {
2165 struct arm_act_context *ic;
2166 kern_return_t kret;
2167 thread_t thread = current_thread();
2168
2169 ic = (struct arm_act_context *) ctx;
2170 if (ic == (struct arm_act_context *) NULL) {
2171 return;
2172 }
2173
2174 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
2175 if (kret != KERN_SUCCESS) {
2176 goto out;
2177 }
2178
2179 #if __ARM_VFP__
2180 if (thread_is_64bit_data(thread)) {
2181 kret = machine_thread_set_state(thread,
2182 ARM_NEON_STATE64,
2183 (thread_state_t)&ic->ns,
2184 ARM_NEON_STATE64_COUNT);
2185 } else {
2186 kret = machine_thread_set_state(thread,
2187 ARM_NEON_STATE,
2188 (thread_state_t)&ic->ns,
2189 ARM_NEON_STATE_COUNT);
2190 }
2191 if (kret != KERN_SUCCESS) {
2192 goto out;
2193 }
2194 #endif
2195 out:
2196 kfree_type(struct arm_act_context, ic);
2197 }
2198
2199 /*
2200 * Routine: act_thread_catt
2201 *
2202 */
2203 void
act_thread_cfree(void * ctx)2204 act_thread_cfree(void *ctx)
2205 {
2206 kfree_type(struct arm_act_context, ctx);
2207 }
2208
2209 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)2210 thread_set_wq_state32(thread_t thread,
2211 thread_state_t tstate)
2212 {
2213 arm_thread_state_t *state;
2214 struct arm_saved_state *saved_state;
2215 struct arm_saved_state32 *saved_state_32;
2216 thread_t curth = current_thread();
2217 spl_t s = 0;
2218
2219 assert(!thread_is_64bit_data(thread));
2220
2221 saved_state = thread->machine.upcb;
2222 saved_state_32 = saved_state32(saved_state);
2223
2224 state = (arm_thread_state_t *)tstate;
2225
2226 if (curth != thread) {
2227 s = splsched();
2228 thread_lock(thread);
2229 }
2230
2231 /*
2232 * do not zero saved_state, it can be concurrently accessed
2233 * and zero is not a valid state for some of the registers,
2234 * like sp.
2235 */
2236 thread_state32_to_saved_state(state, saved_state);
2237 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
2238
2239 if (curth != thread) {
2240 thread_unlock(thread);
2241 splx(s);
2242 }
2243
2244 return KERN_SUCCESS;
2245 }
2246
2247 kern_return_t
thread_set_wq_state64(thread_t thread,thread_state_t tstate)2248 thread_set_wq_state64(thread_t thread,
2249 thread_state_t tstate)
2250 {
2251 arm_thread_state64_t *state;
2252 struct arm_saved_state *saved_state;
2253 struct arm_saved_state64 *saved_state_64;
2254 thread_t curth = current_thread();
2255 spl_t s = 0;
2256
2257 assert(thread_is_64bit_data(thread));
2258
2259 saved_state = thread->machine.upcb;
2260 saved_state_64 = saved_state64(saved_state);
2261 state = (arm_thread_state64_t *)tstate;
2262
2263 if (curth != thread) {
2264 s = splsched();
2265 thread_lock(thread);
2266 }
2267
2268 /*
2269 * do not zero saved_state, it can be concurrently accessed
2270 * and zero is not a valid state for some of the registers,
2271 * like sp.
2272 */
2273 thread_state64_to_saved_state(state, saved_state);
2274 set_user_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
2275
2276 if (curth != thread) {
2277 thread_unlock(thread);
2278 splx(s);
2279 }
2280
2281 return KERN_SUCCESS;
2282 }
2283