1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm64/proc_reg.h>
38 #if __has_feature(ptrauth_calls)
39 #include <ptrauth.h>
40 #endif
41
42
43 struct arm_vfpv2_state {
44 __uint32_t __r[32];
45 __uint32_t __fpscr;
46 };
47
48 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
49
50 #define ARM_VFPV2_STATE_COUNT \
51 ((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
52
53 /*
54 * Forward definitions
55 */
56 void thread_set_child(thread_t child, int pid);
57 void thread_set_parent(thread_t parent, int pid);
58 static void free_debug_state(thread_t thread);
59
60 /*
61 * Maps state flavor to number of words in the state:
62 */
63 /* __private_extern__ */
64 unsigned int _MachineStateCount[] = {
65 [ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
66 [ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
67 [ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
68 [ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
69 [ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
70 [ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
71 [ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
72 [ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
73 [ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
74 [ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
75 [ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
76 [ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
77 };
78
79 extern zone_t ads_zone;
80
81 #if __arm64__
82 /*
83 * Copy values from saved_state to ts64.
84 */
85 void
saved_state_to_thread_state64(const arm_saved_state_t * saved_state,arm_thread_state64_t * ts64)86 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
87 arm_thread_state64_t * ts64)
88 {
89 uint32_t i;
90
91 assert(is_saved_state64(saved_state));
92
93 ts64->fp = get_saved_state_fp(saved_state);
94 ts64->lr = get_saved_state_lr(saved_state);
95 ts64->sp = get_saved_state_sp(saved_state);
96 ts64->pc = get_saved_state_pc(saved_state);
97 ts64->cpsr = get_saved_state_cpsr(saved_state);
98 for (i = 0; i < 29; i++) {
99 ts64->x[i] = get_saved_state_reg(saved_state, i);
100 }
101 }
102
103 /*
104 * Copy values from ts64 to saved_state.
105 *
106 * For safety, CPSR is sanitized as follows:
107 *
108 * - ts64->cpsr.{N,Z,C,V} are copied as-is into saved_state->cpsr
109 * - ts64->cpsr.M is ignored, and saved_state->cpsr.M is reset to EL0
110 * - All other saved_state->cpsr bits are preserved as-is
111 */
112 void
thread_state64_to_saved_state(const arm_thread_state64_t * ts64,arm_saved_state_t * saved_state)113 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
114 arm_saved_state_t * saved_state)
115 {
116 uint32_t i;
117 #if __has_feature(ptrauth_calls)
118 uint64_t intr = ml_pac_safe_interrupts_disable();
119 #endif /* __has_feature(ptrauth_calls) */
120
121 assert(is_saved_state64(saved_state));
122
123 const uint32_t CPSR_COPY_MASK = PSR64_USER_MASK;
124 const uint32_t CPSR_ZERO_MASK = PSR64_MODE_MASK;
125 const uint32_t CPSR_PRESERVE_MASK = ~(CPSR_COPY_MASK | CPSR_ZERO_MASK);
126 #if __has_feature(ptrauth_calls)
127 /* BEGIN IGNORE CODESTYLE */
128 MANIPULATE_SIGNED_THREAD_STATE(saved_state,
129 "and w2, w2, %w[preserve_mask]" "\n"
130 "mov w6, %w[cpsr]" "\n"
131 "and w6, w6, %w[copy_mask]" "\n"
132 "orr w2, w2, w6" "\n"
133 "str w2, [x0, %[SS64_CPSR]]" "\n",
134 [cpsr] "r"(ts64->cpsr),
135 [preserve_mask] "i"(CPSR_PRESERVE_MASK),
136 [copy_mask] "i"(CPSR_COPY_MASK)
137 );
138 /* END IGNORE CODESTYLE */
139 /*
140 * Make writes to ts64->cpsr visible first, since it's useful as a
141 * canary to detect thread-state corruption.
142 */
143 __builtin_arm_dmb(DMB_ST);
144 #else
145 uint32_t new_cpsr = get_saved_state_cpsr(saved_state);
146 new_cpsr &= CPSR_PRESERVE_MASK;
147 new_cpsr |= (ts64->cpsr & CPSR_COPY_MASK);
148 set_saved_state_cpsr(saved_state, new_cpsr);
149 #endif /* __has_feature(ptrauth_calls) */
150 set_saved_state_fp(saved_state, ts64->fp);
151 set_saved_state_lr(saved_state, ts64->lr);
152 set_saved_state_sp(saved_state, ts64->sp);
153 set_saved_state_pc(saved_state, ts64->pc);
154 for (i = 0; i < 29; i++) {
155 set_saved_state_reg(saved_state, i, ts64->x[i]);
156 }
157
158 #if __has_feature(ptrauth_calls)
159 ml_pac_safe_interrupts_restore(intr);
160 #endif /* __has_feature(ptrauth_calls) */
161 }
162
163 #endif /* __arm64__ */
164
165 static kern_return_t
handle_get_arm32_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)166 handle_get_arm32_thread_state(thread_state_t tstate,
167 mach_msg_type_number_t * count,
168 const arm_saved_state_t * saved_state)
169 {
170 if (*count < ARM_THREAD_STATE32_COUNT) {
171 return KERN_INVALID_ARGUMENT;
172 }
173 if (!is_saved_state32(saved_state)) {
174 return KERN_INVALID_ARGUMENT;
175 }
176
177 (void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
178 *count = ARM_THREAD_STATE32_COUNT;
179 return KERN_SUCCESS;
180 }
181
182 static kern_return_t
handle_get_arm64_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)183 handle_get_arm64_thread_state(thread_state_t tstate,
184 mach_msg_type_number_t * count,
185 const arm_saved_state_t * saved_state)
186 {
187 if (*count < ARM_THREAD_STATE64_COUNT) {
188 return KERN_INVALID_ARGUMENT;
189 }
190 if (!is_saved_state64(saved_state)) {
191 return KERN_INVALID_ARGUMENT;
192 }
193
194 (void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
195 *count = ARM_THREAD_STATE64_COUNT;
196 return KERN_SUCCESS;
197 }
198
199
200 static kern_return_t
handle_get_arm_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)201 handle_get_arm_thread_state(thread_state_t tstate,
202 mach_msg_type_number_t * count,
203 const arm_saved_state_t * saved_state)
204 {
205 /* In an arm64 world, this flavor can be used to retrieve the thread
206 * state of a 32-bit or 64-bit thread into a unified structure, but we
207 * need to support legacy clients who are only aware of 32-bit, so
208 * check the count to see what the client is expecting.
209 */
210 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
211 return handle_get_arm32_thread_state(tstate, count, saved_state);
212 }
213
214 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
215 bzero(unified_state, sizeof(*unified_state));
216 #if __arm64__
217 if (is_saved_state64(saved_state)) {
218 unified_state->ash.flavor = ARM_THREAD_STATE64;
219 unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
220 (void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
221 } else
222 #endif
223 {
224 unified_state->ash.flavor = ARM_THREAD_STATE32;
225 unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
226 (void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
227 }
228 *count = ARM_UNIFIED_THREAD_STATE_COUNT;
229 return KERN_SUCCESS;
230 }
231
232
233 static kern_return_t
handle_set_arm32_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)234 handle_set_arm32_thread_state(const thread_state_t tstate,
235 mach_msg_type_number_t count,
236 arm_saved_state_t * saved_state)
237 {
238 if (count != ARM_THREAD_STATE32_COUNT) {
239 return KERN_INVALID_ARGUMENT;
240 }
241
242 (void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
243 return KERN_SUCCESS;
244 }
245
246 static kern_return_t
handle_set_arm64_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)247 handle_set_arm64_thread_state(const thread_state_t tstate,
248 mach_msg_type_number_t count,
249 arm_saved_state_t * saved_state)
250 {
251 if (count != ARM_THREAD_STATE64_COUNT) {
252 return KERN_INVALID_ARGUMENT;
253 }
254
255 (void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
256 return KERN_SUCCESS;
257 }
258
259
260 static kern_return_t
handle_set_arm_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)261 handle_set_arm_thread_state(const thread_state_t tstate,
262 mach_msg_type_number_t count,
263 arm_saved_state_t * saved_state)
264 {
265 /* In an arm64 world, this flavor can be used to set the thread state of a
266 * 32-bit or 64-bit thread from a unified structure, but we need to support
267 * legacy clients who are only aware of 32-bit, so check the count to see
268 * what the client is expecting.
269 */
270 if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
271 if (!is_saved_state32(saved_state)) {
272 return KERN_INVALID_ARGUMENT;
273 }
274 return handle_set_arm32_thread_state(tstate, count, saved_state);
275 }
276
277 const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
278 #if __arm64__
279 if (is_thread_state64(unified_state)) {
280 if (!is_saved_state64(saved_state)) {
281 return KERN_INVALID_ARGUMENT;
282 }
283 (void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
284 } else
285 #endif
286 {
287 if (!is_saved_state32(saved_state)) {
288 return KERN_INVALID_ARGUMENT;
289 }
290 (void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
291 }
292
293 return KERN_SUCCESS;
294 }
295
296
297 /*
298 * Translate thread state arguments to userspace representation
299 */
300
301 kern_return_t
machine_thread_state_convert_to_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)302 machine_thread_state_convert_to_user(
303 thread_t thread,
304 thread_flavor_t flavor,
305 thread_state_t tstate,
306 mach_msg_type_number_t *count)
307 {
308 #if __has_feature(ptrauth_calls)
309 arm_thread_state64_t *ts64;
310
311 switch (flavor) {
312 case ARM_THREAD_STATE:
313 {
314 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
315
316 if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
317 return KERN_SUCCESS;
318 }
319 ts64 = thread_state64(unified_state);
320 break;
321 }
322 case ARM_THREAD_STATE64:
323 {
324 if (*count < ARM_THREAD_STATE64_COUNT) {
325 return KERN_SUCCESS;
326 }
327 ts64 = (arm_thread_state64_t *)tstate;
328 break;
329 }
330 default:
331 return KERN_SUCCESS;
332 }
333
334 // Note that kernel threads never have disable_user_jop set
335 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
336 thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
337 ) {
338 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
339 return KERN_SUCCESS;
340 }
341
342 ts64->flags = 0;
343 if (ts64->lr) {
344 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
345 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
346 ptrauth_key_return_address);
347 if (ts64->lr != stripped_lr) {
348 // Need to allow already-signed lr value to round-trip as is
349 ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
350 }
351 // Note that an IB-signed return address that happens to have a 0 signature value
352 // will round-trip correctly even if IA-signed again below (and IA-authd later)
353 }
354
355 if (arm_user_jop_disabled()) {
356 return KERN_SUCCESS;
357 }
358
359 if (ts64->pc) {
360 ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
361 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"),
362 thread->machine.jop_pid);
363 }
364 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
365 ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
366 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"),
367 thread->machine.jop_pid);
368 }
369 if (ts64->sp) {
370 ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
371 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
372 thread->machine.jop_pid);
373 }
374 if (ts64->fp) {
375 ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
376 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
377 thread->machine.jop_pid);
378 }
379
380 return KERN_SUCCESS;
381 #else
382 // No conversion to userspace representation on this platform
383 (void)thread; (void)flavor; (void)tstate; (void)count;
384 return KERN_SUCCESS;
385 #endif /* __has_feature(ptrauth_calls) */
386 }
387
388 /*
389 * Translate thread state arguments from userspace representation
390 */
391
392 kern_return_t
machine_thread_state_convert_from_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)393 machine_thread_state_convert_from_user(
394 thread_t thread,
395 thread_flavor_t flavor,
396 thread_state_t tstate,
397 mach_msg_type_number_t count)
398 {
399 #if __has_feature(ptrauth_calls)
400 arm_thread_state64_t *ts64;
401
402 switch (flavor) {
403 case ARM_THREAD_STATE:
404 {
405 arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
406
407 if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
408 return KERN_SUCCESS;
409 }
410 ts64 = thread_state64(unified_state);
411 break;
412 }
413 case ARM_THREAD_STATE64:
414 {
415 if (count != ARM_THREAD_STATE64_COUNT) {
416 return KERN_SUCCESS;
417 }
418 ts64 = (arm_thread_state64_t *)tstate;
419 break;
420 }
421 default:
422 return KERN_SUCCESS;
423 }
424
425 // Note that kernel threads never have disable_user_jop set
426 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
427 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
428 ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
429 return KERN_SUCCESS;
430 }
431 // A JOP-disabled process must not set thread state on a JOP-enabled process
432 return KERN_PROTECTION_FAILURE;
433 }
434
435 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
436 if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
437 ) {
438 return KERN_SUCCESS;
439 }
440 // Disallow setting unsigned thread state on JOP-enabled processes.
441 // Ignore flag and treat thread state arguments as signed, ptrauth
442 // poisoning will cause resulting thread state to be invalid
443 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
444 }
445
446 if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
447 // lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
448 uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
449 ptrauth_key_return_address);
450 if (ts64->lr == stripped_lr) {
451 // Don't allow unsigned pointer to be passed through as is. Ignore flag and
452 // treat as IA-signed below (where auth failure may poison the value).
453 ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
454 }
455 // Note that an IB-signed return address that happens to have a 0 signature value
456 // will also have been IA-signed (without this flag being set) and so will IA-auth
457 // correctly below.
458 }
459
460 if (arm_user_jop_disabled()) {
461 return KERN_SUCCESS;
462 }
463
464 if (ts64->pc) {
465 ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
466 ptrauth_key_process_independent_code, ptrauth_string_discriminator("pc"),
467 thread->machine.jop_pid);
468 }
469 if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
470 ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
471 ptrauth_key_process_independent_code, ptrauth_string_discriminator("lr"),
472 thread->machine.jop_pid);
473 }
474 if (ts64->sp) {
475 ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
476 ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
477 thread->machine.jop_pid);
478 }
479 if (ts64->fp) {
480 ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
481 ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
482 thread->machine.jop_pid);
483 }
484
485 return KERN_SUCCESS;
486 #else
487 // No conversion from userspace representation on this platform
488 (void)thread; (void)flavor; (void)tstate; (void)count;
489 return KERN_SUCCESS;
490 #endif /* __has_feature(ptrauth_calls) */
491 }
492
493 /*
494 * Translate signal context data pointer to userspace representation
495 */
496
497 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(thread_t thread,user_addr_t * uctxp)498 machine_thread_siguctx_pointer_convert_to_user(
499 thread_t thread,
500 user_addr_t *uctxp)
501 {
502 #if __has_feature(ptrauth_calls)
503 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
504 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
505 return KERN_SUCCESS;
506 }
507
508 if (arm_user_jop_disabled()) {
509 return KERN_SUCCESS;
510 }
511
512 if (*uctxp) {
513 *uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
514 ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"),
515 thread->machine.jop_pid);
516 }
517
518 return KERN_SUCCESS;
519 #else
520 // No conversion to userspace representation on this platform
521 (void)thread; (void)uctxp;
522 return KERN_SUCCESS;
523 #endif /* __has_feature(ptrauth_calls) */
524 }
525
526 /*
527 * Translate array of function pointer syscall arguments from userspace representation
528 */
529
530 kern_return_t
machine_thread_function_pointers_convert_from_user(thread_t thread,user_addr_t * fptrs,uint32_t count)531 machine_thread_function_pointers_convert_from_user(
532 thread_t thread,
533 user_addr_t *fptrs,
534 uint32_t count)
535 {
536 #if __has_feature(ptrauth_calls)
537 if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
538 assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
539 return KERN_SUCCESS;
540 }
541
542 if (arm_user_jop_disabled()) {
543 return KERN_SUCCESS;
544 }
545
546 while (count--) {
547 if (*fptrs) {
548 *fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
549 ptrauth_key_function_pointer, 0, thread->machine.jop_pid);
550 }
551 fptrs++;
552 }
553
554 return KERN_SUCCESS;
555 #else
556 // No conversion from userspace representation on this platform
557 (void)thread; (void)fptrs; (void)count;
558 return KERN_SUCCESS;
559 #endif /* __has_feature(ptrauth_calls) */
560 }
561
562 /*
563 * Routine: machine_thread_get_state
564 *
565 */
566 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)567 machine_thread_get_state(thread_t thread,
568 thread_flavor_t flavor,
569 thread_state_t tstate,
570 mach_msg_type_number_t * count)
571 {
572 switch (flavor) {
573 case THREAD_STATE_FLAVOR_LIST:
574 if (*count < 4) {
575 return KERN_INVALID_ARGUMENT;
576 }
577
578 tstate[0] = ARM_THREAD_STATE;
579 tstate[1] = ARM_VFP_STATE;
580 tstate[2] = ARM_EXCEPTION_STATE;
581 tstate[3] = ARM_DEBUG_STATE;
582 *count = 4;
583 break;
584
585 case THREAD_STATE_FLAVOR_LIST_NEW:
586 if (*count < 4) {
587 return KERN_INVALID_ARGUMENT;
588 }
589
590 tstate[0] = ARM_THREAD_STATE;
591 tstate[1] = ARM_VFP_STATE;
592 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
593 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
594 *count = 4;
595 break;
596
597 case THREAD_STATE_FLAVOR_LIST_10_15:
598 if (*count < 5) {
599 return KERN_INVALID_ARGUMENT;
600 }
601
602 tstate[0] = ARM_THREAD_STATE;
603 tstate[1] = ARM_VFP_STATE;
604 tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
605 tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
606 tstate[4] = ARM_PAGEIN_STATE;
607 *count = 5;
608 break;
609
610 case ARM_THREAD_STATE:
611 {
612 kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
613 if (rn) {
614 return rn;
615 }
616 break;
617 }
618 case ARM_THREAD_STATE32:
619 {
620 if (thread_is_64bit_data(thread)) {
621 return KERN_INVALID_ARGUMENT;
622 }
623
624 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
625 if (rn) {
626 return rn;
627 }
628 break;
629 }
630 #if __arm64__
631 case ARM_THREAD_STATE64:
632 {
633 if (!thread_is_64bit_data(thread)) {
634 return KERN_INVALID_ARGUMENT;
635 }
636
637 const arm_saved_state_t *current_state = thread->machine.upcb;
638
639 kern_return_t rn = handle_get_arm64_thread_state(tstate, count,
640 current_state);
641 if (rn) {
642 return rn;
643 }
644
645 break;
646 }
647 #endif
648 case ARM_EXCEPTION_STATE:{
649 struct arm_exception_state *state;
650 struct arm_saved_state32 *saved_state;
651
652 if (*count < ARM_EXCEPTION_STATE_COUNT) {
653 return KERN_INVALID_ARGUMENT;
654 }
655 if (thread_is_64bit_data(thread)) {
656 return KERN_INVALID_ARGUMENT;
657 }
658
659 state = (struct arm_exception_state *) tstate;
660 saved_state = saved_state32(thread->machine.upcb);
661
662 state->exception = saved_state->exception;
663 state->fsr = saved_state->esr;
664 state->far = saved_state->far;
665
666 *count = ARM_EXCEPTION_STATE_COUNT;
667 break;
668 }
669 case ARM_EXCEPTION_STATE64:{
670 struct arm_exception_state64 *state;
671 struct arm_saved_state64 *saved_state;
672
673 if (*count < ARM_EXCEPTION_STATE64_COUNT) {
674 return KERN_INVALID_ARGUMENT;
675 }
676 if (!thread_is_64bit_data(thread)) {
677 return KERN_INVALID_ARGUMENT;
678 }
679
680 state = (struct arm_exception_state64 *) tstate;
681 saved_state = saved_state64(thread->machine.upcb);
682
683 state->exception = saved_state->exception;
684 state->far = saved_state->far;
685 state->esr = saved_state->esr;
686
687 *count = ARM_EXCEPTION_STATE64_COUNT;
688 break;
689 }
690 case ARM_DEBUG_STATE:{
691 arm_legacy_debug_state_t *state;
692 arm_debug_state32_t *thread_state;
693
694 if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
695 return KERN_INVALID_ARGUMENT;
696 }
697
698 if (thread_is_64bit_data(thread)) {
699 return KERN_INVALID_ARGUMENT;
700 }
701
702 state = (arm_legacy_debug_state_t *) tstate;
703 thread_state = find_debug_state32(thread);
704
705 if (thread_state == NULL) {
706 bzero(state, sizeof(arm_legacy_debug_state_t));
707 } else {
708 bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
709 }
710
711 *count = ARM_LEGACY_DEBUG_STATE_COUNT;
712 break;
713 }
714 case ARM_DEBUG_STATE32:{
715 arm_debug_state32_t *state;
716 arm_debug_state32_t *thread_state;
717
718 if (*count < ARM_DEBUG_STATE32_COUNT) {
719 return KERN_INVALID_ARGUMENT;
720 }
721
722 if (thread_is_64bit_data(thread)) {
723 return KERN_INVALID_ARGUMENT;
724 }
725
726 state = (arm_debug_state32_t *) tstate;
727 thread_state = find_debug_state32(thread);
728
729 if (thread_state == NULL) {
730 bzero(state, sizeof(arm_debug_state32_t));
731 } else {
732 bcopy(thread_state, state, sizeof(arm_debug_state32_t));
733 }
734
735 *count = ARM_DEBUG_STATE32_COUNT;
736 break;
737 }
738
739 case ARM_DEBUG_STATE64:{
740 arm_debug_state64_t *state;
741 arm_debug_state64_t *thread_state;
742
743 if (*count < ARM_DEBUG_STATE64_COUNT) {
744 return KERN_INVALID_ARGUMENT;
745 }
746
747 if (!thread_is_64bit_data(thread)) {
748 return KERN_INVALID_ARGUMENT;
749 }
750
751 state = (arm_debug_state64_t *) tstate;
752 thread_state = find_debug_state64(thread);
753
754 if (thread_state == NULL) {
755 bzero(state, sizeof(arm_debug_state64_t));
756 } else {
757 bcopy(thread_state, state, sizeof(arm_debug_state64_t));
758 }
759
760 *count = ARM_DEBUG_STATE64_COUNT;
761 break;
762 }
763
764 case ARM_VFP_STATE:{
765 struct arm_vfp_state *state;
766 arm_neon_saved_state32_t *thread_state;
767 unsigned int max;
768
769 if (*count < ARM_VFP_STATE_COUNT) {
770 if (*count < ARM_VFPV2_STATE_COUNT) {
771 return KERN_INVALID_ARGUMENT;
772 } else {
773 *count = ARM_VFPV2_STATE_COUNT;
774 }
775 }
776
777 if (*count == ARM_VFPV2_STATE_COUNT) {
778 max = 32;
779 } else {
780 max = 64;
781 }
782
783 state = (struct arm_vfp_state *) tstate;
784 thread_state = neon_state32(thread->machine.uNeon);
785 /* ARM64 TODO: set fpsr and fpcr from state->fpscr */
786
787 bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
788 *count = (max + 1);
789 break;
790 }
791 case ARM_NEON_STATE:{
792 arm_neon_state_t *state;
793 arm_neon_saved_state32_t *thread_state;
794
795 if (*count < ARM_NEON_STATE_COUNT) {
796 return KERN_INVALID_ARGUMENT;
797 }
798
799 if (thread_is_64bit_data(thread)) {
800 return KERN_INVALID_ARGUMENT;
801 }
802
803 state = (arm_neon_state_t *)tstate;
804 thread_state = neon_state32(thread->machine.uNeon);
805
806 assert(sizeof(*thread_state) == sizeof(*state));
807 bcopy(thread_state, state, sizeof(arm_neon_state_t));
808
809 *count = ARM_NEON_STATE_COUNT;
810 break;
811 }
812
813 case ARM_NEON_STATE64:{
814 arm_neon_state64_t *state;
815 arm_neon_saved_state64_t *thread_state;
816
817 if (*count < ARM_NEON_STATE64_COUNT) {
818 return KERN_INVALID_ARGUMENT;
819 }
820
821 if (!thread_is_64bit_data(thread)) {
822 return KERN_INVALID_ARGUMENT;
823 }
824
825 state = (arm_neon_state64_t *)tstate;
826 thread_state = neon_state64(thread->machine.uNeon);
827
828 /* For now, these are identical */
829 assert(sizeof(*state) == sizeof(*thread_state));
830 bcopy(thread_state, state, sizeof(arm_neon_state64_t));
831
832
833 *count = ARM_NEON_STATE64_COUNT;
834 break;
835 }
836
837
838 case ARM_PAGEIN_STATE: {
839 arm_pagein_state_t *state;
840
841 if (*count < ARM_PAGEIN_STATE_COUNT) {
842 return KERN_INVALID_ARGUMENT;
843 }
844
845 state = (arm_pagein_state_t *)tstate;
846 state->__pagein_error = thread->t_pagein_error;
847
848 *count = ARM_PAGEIN_STATE_COUNT;
849 break;
850 }
851
852
853 default:
854 return KERN_INVALID_ARGUMENT;
855 }
856 return KERN_SUCCESS;
857 }
858
859
860 /*
861 * Routine: machine_thread_get_kern_state
862 *
863 */
864 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)865 machine_thread_get_kern_state(thread_t thread,
866 thread_flavor_t flavor,
867 thread_state_t tstate,
868 mach_msg_type_number_t * count)
869 {
870 /*
871 * This works only for an interrupted kernel thread
872 */
873 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
874 return KERN_FAILURE;
875 }
876
877 switch (flavor) {
878 case ARM_THREAD_STATE:
879 {
880 kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
881 if (rn) {
882 return rn;
883 }
884 break;
885 }
886 case ARM_THREAD_STATE32:
887 {
888 kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
889 if (rn) {
890 return rn;
891 }
892 break;
893 }
894 #if __arm64__
895 case ARM_THREAD_STATE64:
896 {
897 kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
898 if (rn) {
899 return rn;
900 }
901 break;
902 }
903 #endif
904 default:
905 return KERN_INVALID_ARGUMENT;
906 }
907 return KERN_SUCCESS;
908 }
909
910 void
machine_thread_switch_addrmode(thread_t thread)911 machine_thread_switch_addrmode(thread_t thread)
912 {
913 if (task_has_64Bit_data(get_threadtask(thread))) {
914 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
915 thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
916 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
917 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
918
919 /*
920 * Reinitialize the NEON state.
921 */
922 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
923 thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
924 } else {
925 thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
926 thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
927 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
928 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
929
930 /*
931 * Reinitialize the NEON state.
932 */
933 bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
934 thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
935 }
936 }
937
938 extern long long arm_debug_get(void);
939
940 /*
941 * Routine: machine_thread_set_state
942 *
943 */
944 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)945 machine_thread_set_state(thread_t thread,
946 thread_flavor_t flavor,
947 thread_state_t tstate,
948 mach_msg_type_number_t count)
949 {
950 kern_return_t rn;
951
952 switch (flavor) {
953 case ARM_THREAD_STATE:
954 rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
955 if (rn) {
956 return rn;
957 }
958 break;
959
960 case ARM_THREAD_STATE32:
961 if (thread_is_64bit_data(thread)) {
962 return KERN_INVALID_ARGUMENT;
963 }
964
965 rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
966 if (rn) {
967 return rn;
968 }
969 break;
970
971 #if __arm64__
972 case ARM_THREAD_STATE64:
973 if (!thread_is_64bit_data(thread)) {
974 return KERN_INVALID_ARGUMENT;
975 }
976
977
978 rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
979 if (rn) {
980 return rn;
981 }
982 break;
983 #endif
984 case ARM_EXCEPTION_STATE:{
985 if (count != ARM_EXCEPTION_STATE_COUNT) {
986 return KERN_INVALID_ARGUMENT;
987 }
988 if (thread_is_64bit_data(thread)) {
989 return KERN_INVALID_ARGUMENT;
990 }
991
992 break;
993 }
994 case ARM_EXCEPTION_STATE64:{
995 if (count != ARM_EXCEPTION_STATE64_COUNT) {
996 return KERN_INVALID_ARGUMENT;
997 }
998 if (!thread_is_64bit_data(thread)) {
999 return KERN_INVALID_ARGUMENT;
1000 }
1001
1002 break;
1003 }
1004 case ARM_DEBUG_STATE:
1005 {
1006 arm_legacy_debug_state_t *state;
1007 boolean_t enabled = FALSE;
1008 unsigned int i;
1009
1010 if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
1011 return KERN_INVALID_ARGUMENT;
1012 }
1013 if (thread_is_64bit_data(thread)) {
1014 return KERN_INVALID_ARGUMENT;
1015 }
1016
1017 state = (arm_legacy_debug_state_t *) tstate;
1018
1019 for (i = 0; i < 16; i++) {
1020 /* do not allow context IDs to be set */
1021 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1022 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1023 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1024 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1025 return KERN_PROTECTION_FAILURE;
1026 }
1027 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1028 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1029 enabled = TRUE;
1030 }
1031 }
1032
1033 if (!enabled) {
1034 free_debug_state(thread);
1035 } else {
1036 arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread);
1037
1038 if (thread_state == NULL) {
1039 return KERN_FAILURE;
1040 }
1041
1042 for (i = 0; i < 16; i++) {
1043 /* set appropriate privilege; mask out unknown bits */
1044 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1045 | ARM_DBGBCR_MATCH_MASK
1046 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1047 | ARM_DBG_CR_ENABLE_MASK))
1048 | ARM_DBGBCR_TYPE_IVA
1049 | ARM_DBG_CR_LINKED_UNLINKED
1050 | ARM_DBG_CR_SECURITY_STATE_BOTH
1051 | ARM_DBG_CR_MODE_CONTROL_USER;
1052 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1053 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1054 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1055 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1056 | ARM_DBG_CR_ENABLE_MASK))
1057 | ARM_DBG_CR_LINKED_UNLINKED
1058 | ARM_DBG_CR_SECURITY_STATE_BOTH
1059 | ARM_DBG_CR_MODE_CONTROL_USER;
1060 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1061 }
1062
1063 thread_state->mdscr_el1 = 0ULL; // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1064 }
1065
1066 if (thread == current_thread()) {
1067 arm_debug_set32(thread->machine.DebugData);
1068 }
1069
1070 break;
1071 }
1072 case ARM_DEBUG_STATE32:
1073 /* ARM64_TODO subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1074 {
1075 arm_debug_state32_t *state;
1076 boolean_t enabled = FALSE;
1077 unsigned int i;
1078
1079 if (count != ARM_DEBUG_STATE32_COUNT) {
1080 return KERN_INVALID_ARGUMENT;
1081 }
1082 if (thread_is_64bit_data(thread)) {
1083 return KERN_INVALID_ARGUMENT;
1084 }
1085
1086 state = (arm_debug_state32_t *) tstate;
1087
1088 if (state->mdscr_el1 & MDSCR_SS) {
1089 enabled = TRUE;
1090 }
1091
1092 for (i = 0; i < 16; i++) {
1093 /* do not allow context IDs to be set */
1094 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1095 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1096 || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1097 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1098 return KERN_PROTECTION_FAILURE;
1099 }
1100 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1101 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1102 enabled = TRUE;
1103 }
1104 }
1105
1106 if (!enabled) {
1107 free_debug_state(thread);
1108 } else {
1109 arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread);
1110
1111 if (thread_state == NULL) {
1112 return KERN_FAILURE;
1113 }
1114
1115 if (state->mdscr_el1 & MDSCR_SS) {
1116 thread_state->mdscr_el1 |= MDSCR_SS;
1117 } else {
1118 thread_state->mdscr_el1 &= ~MDSCR_SS;
1119 }
1120
1121 for (i = 0; i < 16; i++) {
1122 /* set appropriate privilege; mask out unknown bits */
1123 thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1124 | ARM_DBGBCR_MATCH_MASK
1125 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1126 | ARM_DBG_CR_ENABLE_MASK))
1127 | ARM_DBGBCR_TYPE_IVA
1128 | ARM_DBG_CR_LINKED_UNLINKED
1129 | ARM_DBG_CR_SECURITY_STATE_BOTH
1130 | ARM_DBG_CR_MODE_CONTROL_USER;
1131 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1132 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1133 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1134 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1135 | ARM_DBG_CR_ENABLE_MASK))
1136 | ARM_DBG_CR_LINKED_UNLINKED
1137 | ARM_DBG_CR_SECURITY_STATE_BOTH
1138 | ARM_DBG_CR_MODE_CONTROL_USER;
1139 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1140 }
1141 }
1142
1143 if (thread == current_thread()) {
1144 arm_debug_set32(thread->machine.DebugData);
1145 }
1146
1147 break;
1148 }
1149
1150 case ARM_DEBUG_STATE64:
1151 {
1152 arm_debug_state64_t *state;
1153 boolean_t enabled = FALSE;
1154 unsigned int i;
1155
1156 if (count != ARM_DEBUG_STATE64_COUNT) {
1157 return KERN_INVALID_ARGUMENT;
1158 }
1159 if (!thread_is_64bit_data(thread)) {
1160 return KERN_INVALID_ARGUMENT;
1161 }
1162
1163 state = (arm_debug_state64_t *) tstate;
1164
1165 if (state->mdscr_el1 & MDSCR_SS) {
1166 enabled = TRUE;
1167 }
1168
1169 for (i = 0; i < 16; i++) {
1170 /* do not allow context IDs to be set */
1171 if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1172 || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1173 || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1174 return KERN_PROTECTION_FAILURE;
1175 }
1176 if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1177 || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1178 enabled = TRUE;
1179 }
1180 }
1181
1182 if (!enabled) {
1183 free_debug_state(thread);
1184 } else {
1185 arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread);
1186
1187 if (thread_state == NULL) {
1188 return KERN_FAILURE;
1189 }
1190
1191 if (state->mdscr_el1 & MDSCR_SS) {
1192 thread_state->mdscr_el1 |= MDSCR_SS;
1193 } else {
1194 thread_state->mdscr_el1 &= ~MDSCR_SS;
1195 }
1196
1197 for (i = 0; i < 16; i++) {
1198 /* set appropriate privilege; mask out unknown bits */
1199 thread_state->bcr[i] = (state->bcr[i] & (0 /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1200 | 0 /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1201 | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1202 | ARM_DBG_CR_ENABLE_MASK))
1203 | ARM_DBGBCR_TYPE_IVA
1204 | ARM_DBG_CR_LINKED_UNLINKED
1205 | ARM_DBG_CR_SECURITY_STATE_BOTH
1206 | ARM_DBG_CR_MODE_CONTROL_USER;
1207 thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1208 thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1209 | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1210 | ARM_DBGWCR_ACCESS_CONTROL_MASK
1211 | ARM_DBG_CR_ENABLE_MASK))
1212 | ARM_DBG_CR_LINKED_UNLINKED
1213 | ARM_DBG_CR_SECURITY_STATE_BOTH
1214 | ARM_DBG_CR_MODE_CONTROL_USER;
1215 thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1216 }
1217 }
1218
1219 if (thread == current_thread()) {
1220 arm_debug_set64(thread->machine.DebugData);
1221 }
1222
1223 break;
1224 }
1225
1226 case ARM_VFP_STATE:{
1227 struct arm_vfp_state *state;
1228 arm_neon_saved_state32_t *thread_state;
1229 unsigned int max;
1230
1231 if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1232 return KERN_INVALID_ARGUMENT;
1233 }
1234
1235 if (count == ARM_VFPV2_STATE_COUNT) {
1236 max = 32;
1237 } else {
1238 max = 64;
1239 }
1240
1241 state = (struct arm_vfp_state *) tstate;
1242 thread_state = neon_state32(thread->machine.uNeon);
1243 /* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1244
1245 bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1246
1247 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1248 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1249 break;
1250 }
1251
1252 case ARM_NEON_STATE:{
1253 arm_neon_state_t *state;
1254 arm_neon_saved_state32_t *thread_state;
1255
1256 if (count != ARM_NEON_STATE_COUNT) {
1257 return KERN_INVALID_ARGUMENT;
1258 }
1259
1260 if (thread_is_64bit_data(thread)) {
1261 return KERN_INVALID_ARGUMENT;
1262 }
1263
1264 state = (arm_neon_state_t *)tstate;
1265 thread_state = neon_state32(thread->machine.uNeon);
1266
1267 assert(sizeof(*state) == sizeof(*thread_state));
1268 bcopy(state, thread_state, sizeof(arm_neon_state_t));
1269
1270 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1271 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1272 break;
1273 }
1274
1275 case ARM_NEON_STATE64:{
1276 arm_neon_state64_t *state;
1277 arm_neon_saved_state64_t *thread_state;
1278
1279 if (count != ARM_NEON_STATE64_COUNT) {
1280 return KERN_INVALID_ARGUMENT;
1281 }
1282
1283 if (!thread_is_64bit_data(thread)) {
1284 return KERN_INVALID_ARGUMENT;
1285 }
1286
1287 state = (arm_neon_state64_t *)tstate;
1288 thread_state = neon_state64(thread->machine.uNeon);
1289
1290 assert(sizeof(*state) == sizeof(*thread_state));
1291 bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1292
1293
1294 thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1295 thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1296 break;
1297 }
1298
1299
1300 default:
1301 return KERN_INVALID_ARGUMENT;
1302 }
1303 return KERN_SUCCESS;
1304 }
1305
1306 mach_vm_address_t
machine_thread_pc(thread_t thread)1307 machine_thread_pc(thread_t thread)
1308 {
1309 struct arm_saved_state *ss = get_user_regs(thread);
1310 return (mach_vm_address_t)get_saved_state_pc(ss);
1311 }
1312
1313 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)1314 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1315 {
1316 set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1317 }
1318
1319 /*
1320 * Routine: machine_thread_state_initialize
1321 *
1322 */
1323 void
machine_thread_state_initialize(thread_t thread)1324 machine_thread_state_initialize(thread_t thread)
1325 {
1326 arm_context_t *context = thread->machine.contextData;
1327
1328 /*
1329 * Should always be set up later. For a kernel thread, we don't care
1330 * about this state. For a user thread, we'll set the state up in
1331 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1332 */
1333
1334 if (context != NULL) {
1335 bzero(&context->ss.uss, sizeof(context->ss.uss));
1336 bzero(&context->ns.uns, sizeof(context->ns.uns));
1337
1338 if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1339 context->ns.ns_64.fpcr = FPCR_DEFAULT;
1340 } else {
1341 context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1342 }
1343 context->ss.ss_64.cpsr = PSR64_USER64_DEFAULT;
1344 }
1345
1346 thread->machine.DebugData = NULL;
1347
1348 #if defined(HAS_APPLE_PAC)
1349 /* Sign the initial user-space thread state */
1350 if (thread->machine.upcb != NULL) {
1351 uint64_t intr = ml_pac_safe_interrupts_disable();
1352 asm volatile (
1353 "mov x0, %[iss]" "\n"
1354 "mov x1, #0" "\n"
1355 "mov w2, %w[usr]" "\n"
1356 "mov x3, #0" "\n"
1357 "mov x4, #0" "\n"
1358 "mov x5, #0" "\n"
1359 "mov x6, lr" "\n"
1360 "msr SPSel, #1" "\n"
1361 "bl _ml_sign_thread_state" "\n"
1362 "msr SPSel, #0" "\n"
1363 "mov lr, x6" "\n"
1364 :
1365 : [iss] "r"(thread->machine.upcb), [usr] "r"(thread->machine.upcb->ss_64.cpsr)
1366 : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
1367 );
1368 ml_pac_safe_interrupts_restore(intr);
1369 }
1370 #endif /* defined(HAS_APPLE_PAC) */
1371 }
1372
1373 /*
1374 * Routine: machine_thread_dup
1375 *
1376 */
1377 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)1378 machine_thread_dup(thread_t self,
1379 thread_t target,
1380 __unused boolean_t is_corpse)
1381 {
1382 struct arm_saved_state *self_saved_state;
1383 struct arm_saved_state *target_saved_state;
1384
1385 target->machine.cthread_self = self->machine.cthread_self;
1386
1387 self_saved_state = self->machine.upcb;
1388 target_saved_state = target->machine.upcb;
1389 bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1390 #if defined(HAS_APPLE_PAC)
1391 if (!is_corpse && is_saved_state64(self_saved_state)) {
1392 check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1393 }
1394 #endif /* defined(HAS_APPLE_PAC) */
1395
1396 arm_neon_saved_state_t *self_neon_state = self->machine.uNeon;
1397 arm_neon_saved_state_t *target_neon_state = target->machine.uNeon;
1398 bcopy(self_neon_state, target_neon_state, sizeof(*target_neon_state));
1399
1400 return KERN_SUCCESS;
1401 }
1402
1403 /*
1404 * Routine: get_user_regs
1405 *
1406 */
1407 struct arm_saved_state *
get_user_regs(thread_t thread)1408 get_user_regs(thread_t thread)
1409 {
1410 return thread->machine.upcb;
1411 }
1412
1413 arm_neon_saved_state_t *
get_user_neon_regs(thread_t thread)1414 get_user_neon_regs(thread_t thread)
1415 {
1416 return thread->machine.uNeon;
1417 }
1418
1419 /*
1420 * Routine: find_user_regs
1421 *
1422 */
1423 struct arm_saved_state *
find_user_regs(thread_t thread)1424 find_user_regs(thread_t thread)
1425 {
1426 return thread->machine.upcb;
1427 }
1428
1429 /*
1430 * Routine: find_kern_regs
1431 *
1432 */
1433 struct arm_saved_state *
find_kern_regs(thread_t thread)1434 find_kern_regs(thread_t thread)
1435 {
1436 /*
1437 * This works only for an interrupted kernel thread
1438 */
1439 if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1440 return (struct arm_saved_state *) NULL;
1441 } else {
1442 return getCpuDatap()->cpu_int_state;
1443 }
1444 }
1445
1446 arm_debug_state32_t *
find_debug_state32(thread_t thread)1447 find_debug_state32(thread_t thread)
1448 {
1449 if (thread && thread->machine.DebugData) {
1450 return &(thread->machine.DebugData->uds.ds32);
1451 } else {
1452 return NULL;
1453 }
1454 }
1455
1456 arm_debug_state64_t *
find_debug_state64(thread_t thread)1457 find_debug_state64(thread_t thread)
1458 {
1459 if (thread && thread->machine.DebugData) {
1460 return &(thread->machine.DebugData->uds.ds64);
1461 } else {
1462 return NULL;
1463 }
1464 }
1465
1466 /**
1467 * Finds the debug state for the given 64 bit thread, allocating one if it
1468 * does not exist.
1469 *
1470 * @param thread 64 bit thread to find or allocate debug state for
1471 *
1472 * @returns A pointer to the given thread's 64 bit debug state or a null
1473 * pointer if the given thread is null or the allocation of a new
1474 * debug state fails.
1475 */
1476 arm_debug_state64_t *
find_or_allocate_debug_state64(thread_t thread)1477 find_or_allocate_debug_state64(thread_t thread)
1478 {
1479 arm_debug_state64_t *thread_state = find_debug_state64(thread);
1480 if (thread != NULL && thread_state == NULL) {
1481 thread->machine.DebugData = zalloc_flags(ads_zone,
1482 Z_WAITOK | Z_NOFAIL);
1483 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1484 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1485 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1486 thread_state = find_debug_state64(thread);
1487 }
1488 return thread_state;
1489 }
1490
1491 /**
1492 * Finds the debug state for the given 32 bit thread, allocating one if it
1493 * does not exist.
1494 *
1495 * @param thread 32 bit thread to find or allocate debug state for
1496 *
1497 * @returns A pointer to the given thread's 32 bit debug state or a null
1498 * pointer if the given thread is null or the allocation of a new
1499 * debug state fails.
1500 */
1501 arm_debug_state32_t *
find_or_allocate_debug_state32(thread_t thread)1502 find_or_allocate_debug_state32(thread_t thread)
1503 {
1504 arm_debug_state32_t *thread_state = find_debug_state32(thread);
1505 if (thread != NULL && thread_state == NULL) {
1506 thread->machine.DebugData = zalloc_flags(ads_zone,
1507 Z_WAITOK | Z_NOFAIL);
1508 bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1509 thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1510 thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1511 thread_state = find_debug_state32(thread);
1512 }
1513 return thread_state;
1514 }
1515
1516 /**
1517 * Frees a thread's debug state if allocated. Otherwise does nothing.
1518 *
1519 * @param thread thread to free the debug state of
1520 */
1521 static inline void
free_debug_state(thread_t thread)1522 free_debug_state(thread_t thread)
1523 {
1524 if (thread != NULL && thread->machine.DebugData != NULL) {
1525 void *pTmp = thread->machine.DebugData;
1526 thread->machine.DebugData = NULL;
1527 zfree(ads_zone, pTmp);
1528 }
1529 }
1530
1531 /*
1532 * Routine: thread_userstack
1533 *
1534 */
1535 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,boolean_t is_64bit_data)1536 thread_userstack(__unused thread_t thread,
1537 int flavor,
1538 thread_state_t tstate,
1539 unsigned int count,
1540 mach_vm_offset_t * user_stack,
1541 int * customstack,
1542 boolean_t is_64bit_data
1543 )
1544 {
1545 register_t sp;
1546
1547 switch (flavor) {
1548 case ARM_THREAD_STATE:
1549 if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1550 #if __arm64__
1551 if (is_64bit_data) {
1552 sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1553 } else
1554 #endif
1555 {
1556 sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1557 }
1558
1559 break;
1560 }
1561
1562 /* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1563 OS_FALLTHROUGH;
1564 case ARM_THREAD_STATE32:
1565 if (count != ARM_THREAD_STATE32_COUNT) {
1566 return KERN_INVALID_ARGUMENT;
1567 }
1568 if (is_64bit_data) {
1569 return KERN_INVALID_ARGUMENT;
1570 }
1571
1572 sp = ((arm_thread_state32_t *)tstate)->sp;
1573 break;
1574 #if __arm64__
1575 case ARM_THREAD_STATE64:
1576 if (count != ARM_THREAD_STATE64_COUNT) {
1577 return KERN_INVALID_ARGUMENT;
1578 }
1579 if (!is_64bit_data) {
1580 return KERN_INVALID_ARGUMENT;
1581 }
1582
1583 sp = ((arm_thread_state32_t *)tstate)->sp;
1584 break;
1585 #endif
1586 default:
1587 return KERN_INVALID_ARGUMENT;
1588 }
1589
1590 if (sp) {
1591 *user_stack = CAST_USER_ADDR_T(sp);
1592 if (customstack) {
1593 *customstack = 1;
1594 }
1595 } else {
1596 *user_stack = CAST_USER_ADDR_T(USRSTACK64);
1597 if (customstack) {
1598 *customstack = 0;
1599 }
1600 }
1601
1602 return KERN_SUCCESS;
1603 }
1604
1605 /*
1606 * thread_userstackdefault:
1607 *
1608 * Return the default stack location for the
1609 * thread, if otherwise unknown.
1610 */
1611 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)1612 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1613 boolean_t is64bit)
1614 {
1615 if (is64bit) {
1616 *default_user_stack = USRSTACK64;
1617 } else {
1618 *default_user_stack = USRSTACK;
1619 }
1620
1621 return KERN_SUCCESS;
1622 }
1623
1624 /*
1625 * Routine: thread_setuserstack
1626 *
1627 */
1628 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)1629 thread_setuserstack(thread_t thread,
1630 mach_vm_address_t user_stack)
1631 {
1632 struct arm_saved_state *sv;
1633
1634 sv = get_user_regs(thread);
1635
1636 set_saved_state_sp(sv, user_stack);
1637
1638 return;
1639 }
1640
1641 /*
1642 * Routine: thread_adjuserstack
1643 *
1644 */
1645 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)1646 thread_adjuserstack(thread_t thread,
1647 int adjust)
1648 {
1649 struct arm_saved_state *sv;
1650 uint64_t sp;
1651
1652 sv = get_user_regs(thread);
1653
1654 sp = get_saved_state_sp(sv);
1655 sp += adjust;
1656 set_saved_state_sp(sv, sp);
1657
1658 return sp;
1659 }
1660
1661
1662 /*
1663 * Routine: thread_setentrypoint
1664 *
1665 */
1666 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)1667 thread_setentrypoint(thread_t thread,
1668 mach_vm_offset_t entry)
1669 {
1670 struct arm_saved_state *sv;
1671
1672 sv = get_user_regs(thread);
1673
1674 set_saved_state_pc(sv, entry);
1675
1676 return;
1677 }
1678
1679 /*
1680 * Routine: thread_entrypoint
1681 *
1682 */
1683 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)1684 thread_entrypoint(__unused thread_t thread,
1685 int flavor,
1686 thread_state_t tstate,
1687 unsigned int count,
1688 mach_vm_offset_t * entry_point
1689 )
1690 {
1691 switch (flavor) {
1692 case ARM_THREAD_STATE:
1693 {
1694 struct arm_thread_state *state;
1695
1696 if (count != ARM_THREAD_STATE_COUNT) {
1697 return KERN_INVALID_ARGUMENT;
1698 }
1699
1700 state = (struct arm_thread_state *) tstate;
1701
1702 /*
1703 * If a valid entry point is specified, use it.
1704 */
1705 if (state->pc) {
1706 *entry_point = CAST_USER_ADDR_T(state->pc);
1707 } else {
1708 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1709 }
1710 }
1711 break;
1712
1713 case ARM_THREAD_STATE64:
1714 {
1715 struct arm_thread_state64 *state;
1716
1717 if (count != ARM_THREAD_STATE64_COUNT) {
1718 return KERN_INVALID_ARGUMENT;
1719 }
1720
1721 state = (struct arm_thread_state64*) tstate;
1722
1723 /*
1724 * If a valid entry point is specified, use it.
1725 */
1726 if (state->pc) {
1727 *entry_point = CAST_USER_ADDR_T(state->pc);
1728 } else {
1729 *entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1730 }
1731
1732 break;
1733 }
1734 default:
1735 return KERN_INVALID_ARGUMENT;
1736 }
1737
1738 return KERN_SUCCESS;
1739 }
1740
1741
1742 /*
1743 * Routine: thread_set_child
1744 *
1745 */
1746 void
thread_set_child(thread_t child,int pid)1747 thread_set_child(thread_t child,
1748 int pid)
1749 {
1750 struct arm_saved_state *child_state;
1751
1752 child_state = get_user_regs(child);
1753
1754 set_saved_state_reg(child_state, 0, pid);
1755 set_saved_state_reg(child_state, 1, 1ULL);
1756 }
1757
1758
1759 /*
1760 * Routine: thread_set_parent
1761 *
1762 */
1763 void
thread_set_parent(thread_t parent,int pid)1764 thread_set_parent(thread_t parent,
1765 int pid)
1766 {
1767 struct arm_saved_state *parent_state;
1768
1769 parent_state = get_user_regs(parent);
1770
1771 set_saved_state_reg(parent_state, 0, pid);
1772 set_saved_state_reg(parent_state, 1, 0);
1773 }
1774
1775
1776 struct arm_act_context {
1777 struct arm_unified_thread_state ss;
1778 #if __ARM_VFP__
1779 struct arm_neon_saved_state ns;
1780 #endif
1781 };
1782
1783 /*
1784 * Routine: act_thread_csave
1785 *
1786 */
1787 void *
act_thread_csave(void)1788 act_thread_csave(void)
1789 {
1790 struct arm_act_context *ic;
1791 kern_return_t kret;
1792 unsigned int val;
1793 thread_t thread = current_thread();
1794
1795 ic = kalloc_type(struct arm_act_context, Z_WAITOK);
1796 if (ic == (struct arm_act_context *) NULL) {
1797 return (void *) 0;
1798 }
1799
1800 val = ARM_UNIFIED_THREAD_STATE_COUNT;
1801 kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
1802 if (kret != KERN_SUCCESS) {
1803 kfree_type(struct arm_act_context, ic);
1804 return (void *) 0;
1805 }
1806
1807 #if __ARM_VFP__
1808 if (thread_is_64bit_data(thread)) {
1809 val = ARM_NEON_STATE64_COUNT;
1810 kret = machine_thread_get_state(thread,
1811 ARM_NEON_STATE64,
1812 (thread_state_t)&ic->ns,
1813 &val);
1814 } else {
1815 val = ARM_NEON_STATE_COUNT;
1816 kret = machine_thread_get_state(thread,
1817 ARM_NEON_STATE,
1818 (thread_state_t)&ic->ns,
1819 &val);
1820 }
1821 if (kret != KERN_SUCCESS) {
1822 kfree_type(struct arm_act_context, ic);
1823 return (void *) 0;
1824 }
1825 #endif
1826 return ic;
1827 }
1828
1829 /*
1830 * Routine: act_thread_catt
1831 *
1832 */
1833 void
act_thread_catt(void * ctx)1834 act_thread_catt(void * ctx)
1835 {
1836 struct arm_act_context *ic;
1837 kern_return_t kret;
1838 thread_t thread = current_thread();
1839
1840 ic = (struct arm_act_context *) ctx;
1841 if (ic == (struct arm_act_context *) NULL) {
1842 return;
1843 }
1844
1845 kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
1846 if (kret != KERN_SUCCESS) {
1847 goto out;
1848 }
1849
1850 #if __ARM_VFP__
1851 if (thread_is_64bit_data(thread)) {
1852 kret = machine_thread_set_state(thread,
1853 ARM_NEON_STATE64,
1854 (thread_state_t)&ic->ns,
1855 ARM_NEON_STATE64_COUNT);
1856 } else {
1857 kret = machine_thread_set_state(thread,
1858 ARM_NEON_STATE,
1859 (thread_state_t)&ic->ns,
1860 ARM_NEON_STATE_COUNT);
1861 }
1862 if (kret != KERN_SUCCESS) {
1863 goto out;
1864 }
1865 #endif
1866 out:
1867 kfree_type(struct arm_act_context, ic);
1868 }
1869
1870 /*
1871 * Routine: act_thread_catt
1872 *
1873 */
1874 void
act_thread_cfree(void * ctx)1875 act_thread_cfree(void *ctx)
1876 {
1877 kfree_type(struct arm_act_context, ctx);
1878 }
1879
1880 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)1881 thread_set_wq_state32(thread_t thread,
1882 thread_state_t tstate)
1883 {
1884 arm_thread_state_t *state;
1885 struct arm_saved_state *saved_state;
1886 struct arm_saved_state32 *saved_state_32;
1887 thread_t curth = current_thread();
1888 spl_t s = 0;
1889
1890 assert(!thread_is_64bit_data(thread));
1891
1892 saved_state = thread->machine.upcb;
1893 saved_state_32 = saved_state32(saved_state);
1894
1895 state = (arm_thread_state_t *)tstate;
1896
1897 if (curth != thread) {
1898 s = splsched();
1899 thread_lock(thread);
1900 }
1901
1902 /*
1903 * do not zero saved_state, it can be concurrently accessed
1904 * and zero is not a valid state for some of the registers,
1905 * like sp.
1906 */
1907 thread_state32_to_saved_state(state, saved_state);
1908 saved_state_32->cpsr = PSR64_USER32_DEFAULT;
1909
1910 if (curth != thread) {
1911 thread_unlock(thread);
1912 splx(s);
1913 }
1914
1915 return KERN_SUCCESS;
1916 }
1917
1918 kern_return_t
thread_set_wq_state64(thread_t thread,thread_state_t tstate)1919 thread_set_wq_state64(thread_t thread,
1920 thread_state_t tstate)
1921 {
1922 arm_thread_state64_t *state;
1923 struct arm_saved_state *saved_state;
1924 struct arm_saved_state64 *saved_state_64;
1925 thread_t curth = current_thread();
1926 spl_t s = 0;
1927
1928 assert(thread_is_64bit_data(thread));
1929
1930 saved_state = thread->machine.upcb;
1931 saved_state_64 = saved_state64(saved_state);
1932 state = (arm_thread_state64_t *)tstate;
1933
1934 if (curth != thread) {
1935 s = splsched();
1936 thread_lock(thread);
1937 }
1938
1939 /*
1940 * do not zero saved_state, it can be concurrently accessed
1941 * and zero is not a valid state for some of the registers,
1942 * like sp.
1943 */
1944 thread_state64_to_saved_state(state, saved_state);
1945 set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
1946
1947 if (curth != thread) {
1948 thread_unlock(thread);
1949 splx(s);
1950 }
1951
1952 return KERN_SUCCESS;
1953 }
1954