xref: /xnu-8020.140.41/osfmk/arm64/status.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm64/proc_reg.h>
38 #include <sys/random.h>
39 #if __has_feature(ptrauth_calls)
40 #include <ptrauth.h>
41 #endif
42 
43 #include <libkern/coreanalytics/coreanalytics.h>
44 
45 
46 struct arm_vfpv2_state {
47 	__uint32_t __r[32];
48 	__uint32_t __fpscr;
49 };
50 
51 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
52 
53 #define ARM_VFPV2_STATE_COUNT \
54 	((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
55 
56 /*
57  * Forward definitions
58  */
59 void thread_set_child(thread_t child, int pid);
60 void thread_set_parent(thread_t parent, int pid);
61 static void free_debug_state(thread_t thread);
62 user_addr_t thread_get_sigreturn_token(thread_t thread);
63 uint32_t thread_get_sigreturn_diversifier(thread_t thread);
64 
65 /*
66  * Maps state flavor to number of words in the state:
67  */
68 /* __private_extern__ */
69 unsigned int _MachineStateCount[] = {
70 	[ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
71 	[ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
72 	[ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
73 	[ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
74 	[ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
75 	[ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
76 	[ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
77 	[ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
78 	[ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
79 	[ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
80 	[ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
81 	[ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
82 };
83 
84 extern zone_t ads_zone;
85 
86 #if __arm64__
87 /*
88  * Copy values from saved_state to ts64.
89  */
90 void
saved_state_to_thread_state64(const arm_saved_state_t * saved_state,arm_thread_state64_t * ts64)91 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
92     arm_thread_state64_t *    ts64)
93 {
94 	uint32_t i;
95 
96 	assert(is_saved_state64(saved_state));
97 
98 	ts64->fp = get_saved_state_fp(saved_state);
99 	ts64->lr = get_saved_state_lr(saved_state);
100 	ts64->sp = get_saved_state_sp(saved_state);
101 	ts64->pc = get_saved_state_pc(saved_state);
102 	ts64->cpsr = get_saved_state_cpsr(saved_state);
103 	for (i = 0; i < 29; i++) {
104 		ts64->x[i] = get_saved_state_reg(saved_state, i);
105 	}
106 }
107 
108 /*
109  * Copy values from ts64 to saved_state.
110  *
111  * For safety, CPSR is sanitized as follows:
112  *
113  * - ts64->cpsr.{N,Z,C,V} are copied as-is into saved_state->cpsr
114  * - ts64->cpsr.M is ignored, and saved_state->cpsr.M is reset to EL0
115  * - All other saved_state->cpsr bits are preserved as-is
116  */
117 void
thread_state64_to_saved_state(const arm_thread_state64_t * ts64,arm_saved_state_t * saved_state)118 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
119     arm_saved_state_t *          saved_state)
120 {
121 	uint32_t i;
122 #if __has_feature(ptrauth_calls)
123 	uint64_t intr = ml_pac_safe_interrupts_disable();
124 #endif /* __has_feature(ptrauth_calls) */
125 
126 	assert(is_saved_state64(saved_state));
127 
128 	const uint32_t CPSR_COPY_MASK = PSR64_USER_MASK;
129 	const uint32_t CPSR_ZERO_MASK = PSR64_MODE_MASK;
130 	const uint32_t CPSR_PRESERVE_MASK = ~(CPSR_COPY_MASK | CPSR_ZERO_MASK);
131 #if __has_feature(ptrauth_calls)
132 	/* BEGIN IGNORE CODESTYLE */
133 	MANIPULATE_SIGNED_THREAD_STATE(saved_state,
134 		"and	w2, w2, %w[preserve_mask]"	"\n"
135 		"mov	w6, %w[cpsr]"			"\n"
136 		"and	w6, w6, %w[copy_mask]"		"\n"
137 		"orr	w2, w2, w6"			"\n"
138 		"str	w2, [x0, %[SS64_CPSR]]"		"\n",
139 		[cpsr] "r"(ts64->cpsr),
140 		[preserve_mask] "i"(CPSR_PRESERVE_MASK),
141 		[copy_mask] "i"(CPSR_COPY_MASK)
142 	);
143 	/* END IGNORE CODESTYLE */
144 	/*
145 	 * Make writes to ts64->cpsr visible first, since it's useful as a
146 	 * canary to detect thread-state corruption.
147 	 */
148 	__builtin_arm_dmb(DMB_ST);
149 #else
150 	uint32_t new_cpsr = get_saved_state_cpsr(saved_state);
151 	new_cpsr &= CPSR_PRESERVE_MASK;
152 	new_cpsr |= (ts64->cpsr & CPSR_COPY_MASK);
153 	set_saved_state_cpsr(saved_state, new_cpsr);
154 #endif /* __has_feature(ptrauth_calls) */
155 	set_saved_state_fp(saved_state, ts64->fp);
156 	set_saved_state_lr(saved_state, ts64->lr);
157 	set_saved_state_sp(saved_state, ts64->sp);
158 	set_saved_state_pc(saved_state, ts64->pc);
159 	for (i = 0; i < 29; i++) {
160 		set_saved_state_reg(saved_state, i, ts64->x[i]);
161 	}
162 
163 #if __has_feature(ptrauth_calls)
164 	ml_pac_safe_interrupts_restore(intr);
165 #endif /* __has_feature(ptrauth_calls) */
166 }
167 
168 #endif /* __arm64__ */
169 
170 static kern_return_t
handle_get_arm32_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)171 handle_get_arm32_thread_state(thread_state_t            tstate,
172     mach_msg_type_number_t *  count,
173     const arm_saved_state_t * saved_state)
174 {
175 	if (*count < ARM_THREAD_STATE32_COUNT) {
176 		return KERN_INVALID_ARGUMENT;
177 	}
178 	if (!is_saved_state32(saved_state)) {
179 		return KERN_INVALID_ARGUMENT;
180 	}
181 
182 	(void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
183 	*count = ARM_THREAD_STATE32_COUNT;
184 	return KERN_SUCCESS;
185 }
186 
187 static kern_return_t
handle_get_arm64_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)188 handle_get_arm64_thread_state(thread_state_t            tstate,
189     mach_msg_type_number_t *  count,
190     const arm_saved_state_t * saved_state)
191 {
192 	if (*count < ARM_THREAD_STATE64_COUNT) {
193 		return KERN_INVALID_ARGUMENT;
194 	}
195 	if (!is_saved_state64(saved_state)) {
196 		return KERN_INVALID_ARGUMENT;
197 	}
198 
199 	(void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
200 	*count = ARM_THREAD_STATE64_COUNT;
201 	return KERN_SUCCESS;
202 }
203 
204 
205 static kern_return_t
handle_get_arm_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)206 handle_get_arm_thread_state(thread_state_t            tstate,
207     mach_msg_type_number_t *  count,
208     const arm_saved_state_t * saved_state)
209 {
210 	/* In an arm64 world, this flavor can be used to retrieve the thread
211 	 * state of a 32-bit or 64-bit thread into a unified structure, but we
212 	 * need to support legacy clients who are only aware of 32-bit, so
213 	 * check the count to see what the client is expecting.
214 	 */
215 	if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
216 		return handle_get_arm32_thread_state(tstate, count, saved_state);
217 	}
218 
219 	arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
220 	bzero(unified_state, sizeof(*unified_state));
221 #if __arm64__
222 	if (is_saved_state64(saved_state)) {
223 		unified_state->ash.flavor = ARM_THREAD_STATE64;
224 		unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
225 		(void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
226 	} else
227 #endif
228 	{
229 		unified_state->ash.flavor = ARM_THREAD_STATE32;
230 		unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
231 		(void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
232 	}
233 	*count = ARM_UNIFIED_THREAD_STATE_COUNT;
234 	return KERN_SUCCESS;
235 }
236 
237 
238 static kern_return_t
handle_set_arm32_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)239 handle_set_arm32_thread_state(const thread_state_t   tstate,
240     mach_msg_type_number_t count,
241     arm_saved_state_t *    saved_state)
242 {
243 	if (count != ARM_THREAD_STATE32_COUNT) {
244 		return KERN_INVALID_ARGUMENT;
245 	}
246 
247 	(void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
248 	return KERN_SUCCESS;
249 }
250 
251 static kern_return_t
handle_set_arm64_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)252 handle_set_arm64_thread_state(const thread_state_t   tstate,
253     mach_msg_type_number_t count,
254     arm_saved_state_t *    saved_state)
255 {
256 	if (count != ARM_THREAD_STATE64_COUNT) {
257 		return KERN_INVALID_ARGUMENT;
258 	}
259 
260 	(void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
261 	return KERN_SUCCESS;
262 }
263 
264 
265 static kern_return_t
handle_set_arm_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)266 handle_set_arm_thread_state(const thread_state_t   tstate,
267     mach_msg_type_number_t count,
268     arm_saved_state_t *    saved_state)
269 {
270 	/* In an arm64 world, this flavor can be used to set the thread state of a
271 	 * 32-bit or 64-bit thread from a unified structure, but we need to support
272 	 * legacy clients who are only aware of 32-bit, so check the count to see
273 	 * what the client is expecting.
274 	 */
275 	if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
276 		if (!is_saved_state32(saved_state)) {
277 			return KERN_INVALID_ARGUMENT;
278 		}
279 		return handle_set_arm32_thread_state(tstate, count, saved_state);
280 	}
281 
282 	const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
283 #if __arm64__
284 	if (is_thread_state64(unified_state)) {
285 		if (!is_saved_state64(saved_state)) {
286 			return KERN_INVALID_ARGUMENT;
287 		}
288 		(void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
289 	} else
290 #endif
291 	{
292 		if (!is_saved_state32(saved_state)) {
293 			return KERN_INVALID_ARGUMENT;
294 		}
295 		(void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
296 	}
297 
298 	return KERN_SUCCESS;
299 }
300 
301 
302 #if __has_feature(ptrauth_calls)
303 
304 static inline uint32_t
thread_generate_sigreturn_token(void * ptr,thread_t thread)305 thread_generate_sigreturn_token(
306 	void *ptr,
307 	thread_t thread)
308 {
309 	user64_addr_t token = (user64_addr_t)ptr;
310 	token ^= (user64_addr_t)thread_get_sigreturn_token(thread);
311 	token = (user64_addr_t)pmap_sign_user_ptr((void*)token,
312 	    ptrauth_key_process_independent_data, ptrauth_string_discriminator("nonce"),
313 	    thread->machine.jop_pid);
314 	token >>= 32;
315 	return (uint32_t)token;
316 }
317 #endif //__has_feature(ptrauth_calls)
318 
319 /*
320  * Translate thread state arguments to userspace representation
321  */
322 
323 kern_return_t
machine_thread_state_convert_to_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t tssf_flags)324 machine_thread_state_convert_to_user(
325 	thread_t thread,
326 	thread_flavor_t flavor,
327 	thread_state_t tstate,
328 	mach_msg_type_number_t *count,
329 	thread_set_status_flags_t tssf_flags)
330 {
331 #if __has_feature(ptrauth_calls)
332 	arm_thread_state64_t *ts64;
333 	bool preserve_flags = !!(tssf_flags & TSSF_PRESERVE_FLAGS);
334 	bool stash_sigreturn_token = !!(tssf_flags & TSSF_STASH_SIGRETURN_TOKEN);
335 	bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
336 	bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
337 	uint32_t old_flags;
338 	bool kernel_signed_pc = true;
339 	bool kernel_signed_lr = true;
340 	uint32_t userland_diversifier = 0;
341 
342 	switch (flavor) {
343 	case ARM_THREAD_STATE:
344 	{
345 		arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
346 
347 		if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
348 			return KERN_SUCCESS;
349 		}
350 		ts64 = thread_state64(unified_state);
351 		break;
352 	}
353 	case ARM_THREAD_STATE64:
354 	{
355 		if (*count < ARM_THREAD_STATE64_COUNT) {
356 			return KERN_SUCCESS;
357 		}
358 		ts64 = (arm_thread_state64_t *)tstate;
359 		break;
360 	}
361 	default:
362 		return KERN_SUCCESS;
363 	}
364 
365 	// Note that kernel threads never have disable_user_jop set
366 	if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread()) ||
367 	    thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
368 	    ) {
369 		ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
370 		return KERN_SUCCESS;
371 	}
372 
373 	old_flags = ts64->flags;
374 	ts64->flags = 0;
375 	if (ts64->lr) {
376 		// lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
377 		uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
378 		    ptrauth_key_return_address);
379 		if (ts64->lr != stripped_lr) {
380 			// Need to allow already-signed lr value to round-trip as is
381 			ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
382 		}
383 		// Note that an IB-signed return address that happens to have a 0 signature value
384 		// will round-trip correctly even if IA-signed again below (and IA-authd later)
385 	}
386 
387 	if (arm_user_jop_disabled()) {
388 		return KERN_SUCCESS;
389 	}
390 
391 	if (preserve_flags) {
392 		assert(random_div == false);
393 		assert(thread_div == false);
394 
395 		/* Restore the diversifier and other opaque flags */
396 		ts64->flags |= (old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
397 		userland_diversifier = old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
398 		if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC)) {
399 			kernel_signed_pc = false;
400 		}
401 		if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR)) {
402 			kernel_signed_lr = false;
403 		}
404 	} else {
405 		/* Set a non zero userland diversifier */
406 		if (random_div) {
407 			do {
408 				read_random(&userland_diversifier, sizeof(userland_diversifier));
409 				userland_diversifier &=
410 				    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
411 			} while (userland_diversifier == 0);
412 		} else if (thread_div) {
413 			userland_diversifier = thread_get_sigreturn_diversifier(thread) &
414 			    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
415 		}
416 		ts64->flags |= userland_diversifier;
417 	}
418 
419 	if (kernel_signed_pc) {
420 		ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC;
421 	}
422 
423 	if (kernel_signed_lr) {
424 		ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR;
425 	}
426 
427 
428 	if (ts64->pc) {
429 		uint64_t discriminator = ptrauth_string_discriminator("pc");
430 		if (!kernel_signed_pc && userland_diversifier != 0) {
431 			discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
432 			    ptrauth_string_discriminator("pc"));
433 		}
434 
435 		ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
436 		    ptrauth_key_process_independent_code, discriminator,
437 		    thread->machine.jop_pid);
438 	}
439 	if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
440 		uint64_t discriminator = ptrauth_string_discriminator("lr");
441 		if (!kernel_signed_lr && userland_diversifier != 0) {
442 			discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
443 			    ptrauth_string_discriminator("lr"));
444 		}
445 
446 		ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
447 		    ptrauth_key_process_independent_code, discriminator,
448 		    thread->machine.jop_pid);
449 	}
450 	if (ts64->sp) {
451 		ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
452 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
453 		    thread->machine.jop_pid);
454 	}
455 	if (ts64->fp) {
456 		ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
457 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
458 		    thread->machine.jop_pid);
459 	}
460 
461 	/* Stash the sigreturn token */
462 	if (stash_sigreturn_token) {
463 		if (kernel_signed_pc) {
464 			uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
465 			__DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
466 			    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK);
467 		}
468 
469 		if (kernel_signed_lr) {
470 			uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
471 			__DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
472 			    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK);
473 		}
474 	}
475 
476 	return KERN_SUCCESS;
477 #else
478 	// No conversion to userspace representation on this platform
479 	(void)thread; (void)flavor; (void)tstate; (void)count; (void)tssf_flags;
480 	return KERN_SUCCESS;
481 #endif /* __has_feature(ptrauth_calls) */
482 }
483 
484 #if __has_feature(ptrauth_calls)
485 extern char *   proc_name_address(void *p);
486 
487 CA_EVENT(pac_thread_state_exception_event,
488     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
489 
490 static void
machine_thread_state_check_pac_state(arm_thread_state64_t * ts64,arm_thread_state64_t * old_ts64)491 machine_thread_state_check_pac_state(
492 	arm_thread_state64_t *ts64,
493 	arm_thread_state64_t *old_ts64)
494 {
495 	bool send_event = false;
496 	task_t task = current_task();
497 	void *proc = task->bsd_info;
498 	char *proc_name = (char *) "unknown";
499 
500 	if (((ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC) &&
501 	    ts64->pc != old_ts64->pc) || (!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
502 	    (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR) && (ts64->lr != old_ts64->lr ||
503 	    (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)))) {
504 		send_event = true;
505 	}
506 
507 	if (!send_event) {
508 		return;
509 	}
510 
511 	proc_name = proc_name_address(proc);
512 	ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_exception_event);
513 	CA_EVENT_TYPE(pac_thread_state_exception_event) * pexc_event = ca_event->data;
514 	strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
515 	CA_EVENT_SEND(ca_event);
516 }
517 
518 CA_EVENT(pac_thread_state_sigreturn_event,
519     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
520 
521 static bool
machine_thread_state_check_sigreturn_token(arm_thread_state64_t * ts64,thread_t thread)522 machine_thread_state_check_sigreturn_token(
523 	arm_thread_state64_t *ts64,
524 	thread_t thread)
525 {
526 	task_t task = current_task();
527 	void *proc = task->bsd_info;
528 	char *proc_name = (char *) "unknown";
529 	bool token_matched = true;
530 	bool kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
531 	bool kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
532 
533 	if (kernel_signed_pc) {
534 		/* Compute the sigreturn token */
535 		uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
536 		if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
537 		    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK)) {
538 			token_matched = false;
539 		}
540 	}
541 
542 	if (kernel_signed_lr) {
543 		/* Compute the sigreturn token */
544 		uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
545 		if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
546 		    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK)) {
547 			token_matched = false;
548 		}
549 	}
550 
551 	if (token_matched) {
552 		return true;
553 	}
554 
555 	proc_name = proc_name_address(proc);
556 	ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_sigreturn_event);
557 	CA_EVENT_TYPE(pac_thread_state_sigreturn_event) * psig_event = ca_event->data;
558 	strlcpy(psig_event->proc_name, proc_name, CA_PROCNAME_LEN);
559 	CA_EVENT_SEND(ca_event);
560 	return false;
561 }
562 
563 #endif
564 
565 /*
566  * Translate thread state arguments from userspace representation
567  */
568 
569 kern_return_t
machine_thread_state_convert_from_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t tssf_flags)570 machine_thread_state_convert_from_user(
571 	thread_t thread,
572 	thread_flavor_t flavor,
573 	thread_state_t tstate,
574 	mach_msg_type_number_t count,
575 	thread_state_t old_tstate,
576 	mach_msg_type_number_t old_count,
577 	thread_set_status_flags_t tssf_flags)
578 {
579 #if __has_feature(ptrauth_calls)
580 	arm_thread_state64_t *ts64;
581 	arm_thread_state64_t *old_ts64 = NULL;
582 	void *userland_diversifier = NULL;
583 	bool kernel_signed_pc;
584 	bool kernel_signed_lr;
585 	bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
586 	bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
587 
588 	switch (flavor) {
589 	case ARM_THREAD_STATE:
590 	{
591 		arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
592 
593 		if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
594 			return KERN_SUCCESS;
595 		}
596 		ts64 = thread_state64(unified_state);
597 
598 		arm_unified_thread_state_t *old_unified_state = (arm_unified_thread_state_t *)old_tstate;
599 		if (old_unified_state && old_count >= ARM_UNIFIED_THREAD_STATE_COUNT) {
600 			old_ts64 = thread_state64(old_unified_state);
601 		}
602 		break;
603 	}
604 	case ARM_THREAD_STATE64:
605 	{
606 		if (count != ARM_THREAD_STATE64_COUNT) {
607 			return KERN_SUCCESS;
608 		}
609 		ts64 = (arm_thread_state64_t *)tstate;
610 
611 		if (old_count == ARM_THREAD_STATE64_COUNT) {
612 			old_ts64 = (arm_thread_state64_t *)old_tstate;
613 		}
614 		break;
615 	}
616 	default:
617 		return KERN_SUCCESS;
618 	}
619 
620 	// Note that kernel threads never have disable_user_jop set
621 	if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
622 		if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)) {
623 			ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
624 			return KERN_SUCCESS;
625 		}
626 		// A JOP-disabled process must not set thread state on a JOP-enabled process
627 		return KERN_PROTECTION_FAILURE;
628 	}
629 
630 	if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
631 		if (thread->machine.disable_user_jop || !thread_is_64bit_addr(thread)
632 		    ) {
633 			return KERN_SUCCESS;
634 		}
635 		// Disallow setting unsigned thread state on JOP-enabled processes.
636 		// Ignore flag and treat thread state arguments as signed, ptrauth
637 		// poisoning will cause resulting thread state to be invalid
638 		ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
639 	}
640 
641 	if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
642 		// lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
643 		uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
644 		    ptrauth_key_return_address);
645 		if (ts64->lr == stripped_lr) {
646 			// Don't allow unsigned pointer to be passed through as is. Ignore flag and
647 			// treat as IA-signed below (where auth failure may poison the value).
648 			ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
649 		}
650 		// Note that an IB-signed return address that happens to have a 0 signature value
651 		// will also have been IA-signed (without this flag being set) and so will IA-auth
652 		// correctly below.
653 	}
654 
655 	if (arm_user_jop_disabled()) {
656 		return KERN_SUCCESS;
657 	}
658 
659 	kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
660 	kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
661 	/*
662 	 * Replace pc/lr with old state if allow only
663 	 * user ptr flag is passed and ptrs are marked
664 	 * kernel signed.
665 	 */
666 	if ((tssf_flags & TSSF_CHECK_USER_FLAGS) &&
667 	    (kernel_signed_pc || kernel_signed_lr)) {
668 		if (old_ts64 && old_count == count) {
669 			/* Send a CA event if the thread state does not match */
670 			machine_thread_state_check_pac_state(ts64, old_ts64);
671 
672 			/* Check if user ptrs needs to be replaced */
673 			if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
674 			    kernel_signed_pc) {
675 				ts64->pc = old_ts64->pc;
676 			}
677 
678 			if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
679 			    !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
680 			    kernel_signed_lr) {
681 				ts64->lr = old_ts64->lr;
682 				if (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
683 					ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
684 				} else {
685 					ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
686 				}
687 			}
688 		}
689 	}
690 
691 	/* Validate sigreturn token */
692 	if (tssf_flags & TSSF_CHECK_SIGRETURN_TOKEN) {
693 		bool token_matched = machine_thread_state_check_sigreturn_token(ts64, thread);
694 		if ((tssf_flags & TSSF_ALLOW_ONLY_MATCHING_TOKEN) && !token_matched) {
695 			return KERN_PROTECTION_FAILURE;
696 		}
697 	}
698 
699 	/* Get the userland diversifier */
700 	if (random_div && old_ts64 && old_count == count) {
701 		/* Get the random diversifier from the old thread state */
702 		userland_diversifier = (void *)(long)(old_ts64->flags &
703 		    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
704 	} else if (thread_div) {
705 		userland_diversifier = (void *)(long)(thread_get_sigreturn_diversifier(thread) &
706 		    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
707 	}
708 
709 	if (ts64->pc) {
710 		uint64_t discriminator = ptrauth_string_discriminator("pc");
711 		if (!kernel_signed_pc && userland_diversifier != 0) {
712 			discriminator = ptrauth_blend_discriminator(userland_diversifier,
713 			    ptrauth_string_discriminator("pc"));
714 		}
715 		ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
716 		    ptrauth_key_process_independent_code, discriminator,
717 		    thread->machine.jop_pid);
718 	}
719 	if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
720 		uint64_t discriminator = ptrauth_string_discriminator("lr");
721 		if (!kernel_signed_lr && userland_diversifier != 0) {
722 			discriminator = ptrauth_blend_discriminator(userland_diversifier,
723 			    ptrauth_string_discriminator("lr"));
724 		}
725 		ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
726 		    ptrauth_key_process_independent_code, discriminator,
727 		    thread->machine.jop_pid);
728 	}
729 	if (ts64->sp) {
730 		ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
731 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
732 		    thread->machine.jop_pid);
733 	}
734 	if (ts64->fp) {
735 		ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
736 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
737 		    thread->machine.jop_pid);
738 	}
739 
740 	return KERN_SUCCESS;
741 #else
742 	// No conversion from userspace representation on this platform
743 	(void)thread; (void)flavor; (void)tstate; (void)count;
744 	(void)old_tstate; (void)old_count; (void)tssf_flags;
745 	return KERN_SUCCESS;
746 #endif /* __has_feature(ptrauth_calls) */
747 }
748 
749 /*
750  * Translate signal context data pointer to userspace representation
751  */
752 
753 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(thread_t thread,user_addr_t * uctxp)754 machine_thread_siguctx_pointer_convert_to_user(
755 	thread_t thread,
756 	user_addr_t *uctxp)
757 {
758 #if __has_feature(ptrauth_calls)
759 	if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
760 		assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
761 		return KERN_SUCCESS;
762 	}
763 
764 	if (arm_user_jop_disabled()) {
765 		return KERN_SUCCESS;
766 	}
767 
768 	if (*uctxp) {
769 		*uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
770 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"),
771 		    thread->machine.jop_pid);
772 	}
773 
774 	return KERN_SUCCESS;
775 #else
776 	// No conversion to userspace representation on this platform
777 	(void)thread; (void)uctxp;
778 	return KERN_SUCCESS;
779 #endif /* __has_feature(ptrauth_calls) */
780 }
781 
782 /*
783  * Translate array of function pointer syscall arguments from userspace representation
784  */
785 
786 kern_return_t
machine_thread_function_pointers_convert_from_user(thread_t thread,user_addr_t * fptrs,uint32_t count)787 machine_thread_function_pointers_convert_from_user(
788 	thread_t thread,
789 	user_addr_t *fptrs,
790 	uint32_t count)
791 {
792 #if __has_feature(ptrauth_calls)
793 	if (current_thread()->machine.disable_user_jop || !thread_is_64bit_addr(current_thread())) {
794 		assert(thread->machine.disable_user_jop || !thread_is_64bit_addr(thread));
795 		return KERN_SUCCESS;
796 	}
797 
798 	if (arm_user_jop_disabled()) {
799 		return KERN_SUCCESS;
800 	}
801 
802 	while (count--) {
803 		if (*fptrs) {
804 			*fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
805 			    ptrauth_key_function_pointer, 0, thread->machine.jop_pid);
806 		}
807 		fptrs++;
808 	}
809 
810 	return KERN_SUCCESS;
811 #else
812 	// No conversion from userspace representation on this platform
813 	(void)thread; (void)fptrs; (void)count;
814 	return KERN_SUCCESS;
815 #endif /* __has_feature(ptrauth_calls) */
816 }
817 
818 /*
819  * Routine: machine_thread_get_state
820  *
821  */
822 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)823 machine_thread_get_state(thread_t                 thread,
824     thread_flavor_t          flavor,
825     thread_state_t           tstate,
826     mach_msg_type_number_t * count)
827 {
828 	switch (flavor) {
829 	case THREAD_STATE_FLAVOR_LIST:
830 		if (*count < 4) {
831 			return KERN_INVALID_ARGUMENT;
832 		}
833 
834 		tstate[0] = ARM_THREAD_STATE;
835 		tstate[1] = ARM_VFP_STATE;
836 		tstate[2] = ARM_EXCEPTION_STATE;
837 		tstate[3] = ARM_DEBUG_STATE;
838 		*count = 4;
839 		break;
840 
841 	case THREAD_STATE_FLAVOR_LIST_NEW:
842 		if (*count < 4) {
843 			return KERN_INVALID_ARGUMENT;
844 		}
845 
846 		tstate[0] = ARM_THREAD_STATE;
847 		tstate[1] = ARM_VFP_STATE;
848 		tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
849 		tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
850 		*count = 4;
851 		break;
852 
853 	case THREAD_STATE_FLAVOR_LIST_10_15:
854 		if (*count < 5) {
855 			return KERN_INVALID_ARGUMENT;
856 		}
857 
858 		tstate[0] = ARM_THREAD_STATE;
859 		tstate[1] = ARM_VFP_STATE;
860 		tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
861 		tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
862 		tstate[4] = ARM_PAGEIN_STATE;
863 		*count = 5;
864 		break;
865 
866 	case ARM_THREAD_STATE:
867 	{
868 		kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
869 		if (rn) {
870 			return rn;
871 		}
872 		break;
873 	}
874 	case ARM_THREAD_STATE32:
875 	{
876 		if (thread_is_64bit_data(thread)) {
877 			return KERN_INVALID_ARGUMENT;
878 		}
879 
880 		kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
881 		if (rn) {
882 			return rn;
883 		}
884 		break;
885 	}
886 #if __arm64__
887 	case ARM_THREAD_STATE64:
888 	{
889 		if (!thread_is_64bit_data(thread)) {
890 			return KERN_INVALID_ARGUMENT;
891 		}
892 
893 		const arm_saved_state_t *current_state = thread->machine.upcb;
894 
895 		kern_return_t rn = handle_get_arm64_thread_state(tstate, count,
896 		    current_state);
897 		if (rn) {
898 			return rn;
899 		}
900 
901 		break;
902 	}
903 #endif
904 	case ARM_EXCEPTION_STATE:{
905 		struct arm_exception_state *state;
906 		struct arm_saved_state32 *saved_state;
907 
908 		if (*count < ARM_EXCEPTION_STATE_COUNT) {
909 			return KERN_INVALID_ARGUMENT;
910 		}
911 		if (thread_is_64bit_data(thread)) {
912 			return KERN_INVALID_ARGUMENT;
913 		}
914 
915 		state = (struct arm_exception_state *) tstate;
916 		saved_state = saved_state32(thread->machine.upcb);
917 
918 		state->exception = saved_state->exception;
919 		state->fsr = saved_state->esr;
920 		state->far = saved_state->far;
921 
922 		*count = ARM_EXCEPTION_STATE_COUNT;
923 		break;
924 	}
925 	case ARM_EXCEPTION_STATE64:{
926 		struct arm_exception_state64 *state;
927 		struct arm_saved_state64 *saved_state;
928 
929 		if (*count < ARM_EXCEPTION_STATE64_COUNT) {
930 			return KERN_INVALID_ARGUMENT;
931 		}
932 		if (!thread_is_64bit_data(thread)) {
933 			return KERN_INVALID_ARGUMENT;
934 		}
935 
936 		state = (struct arm_exception_state64 *) tstate;
937 		saved_state = saved_state64(thread->machine.upcb);
938 
939 		state->exception = saved_state->exception;
940 		state->far = saved_state->far;
941 		state->esr = saved_state->esr;
942 
943 		*count = ARM_EXCEPTION_STATE64_COUNT;
944 		break;
945 	}
946 	case ARM_DEBUG_STATE:{
947 		arm_legacy_debug_state_t *state;
948 		arm_debug_state32_t *thread_state;
949 
950 		if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
951 			return KERN_INVALID_ARGUMENT;
952 		}
953 
954 		if (thread_is_64bit_data(thread)) {
955 			return KERN_INVALID_ARGUMENT;
956 		}
957 
958 		state = (arm_legacy_debug_state_t *) tstate;
959 		thread_state = find_debug_state32(thread);
960 
961 		if (thread_state == NULL) {
962 			bzero(state, sizeof(arm_legacy_debug_state_t));
963 		} else {
964 			bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
965 		}
966 
967 		*count = ARM_LEGACY_DEBUG_STATE_COUNT;
968 		break;
969 	}
970 	case ARM_DEBUG_STATE32:{
971 		arm_debug_state32_t *state;
972 		arm_debug_state32_t *thread_state;
973 
974 		if (*count < ARM_DEBUG_STATE32_COUNT) {
975 			return KERN_INVALID_ARGUMENT;
976 		}
977 
978 		if (thread_is_64bit_data(thread)) {
979 			return KERN_INVALID_ARGUMENT;
980 		}
981 
982 		state = (arm_debug_state32_t *) tstate;
983 		thread_state = find_debug_state32(thread);
984 
985 		if (thread_state == NULL) {
986 			bzero(state, sizeof(arm_debug_state32_t));
987 		} else {
988 			bcopy(thread_state, state, sizeof(arm_debug_state32_t));
989 		}
990 
991 		*count = ARM_DEBUG_STATE32_COUNT;
992 		break;
993 	}
994 
995 	case ARM_DEBUG_STATE64:{
996 		arm_debug_state64_t *state;
997 		arm_debug_state64_t *thread_state;
998 
999 		if (*count < ARM_DEBUG_STATE64_COUNT) {
1000 			return KERN_INVALID_ARGUMENT;
1001 		}
1002 
1003 		if (!thread_is_64bit_data(thread)) {
1004 			return KERN_INVALID_ARGUMENT;
1005 		}
1006 
1007 		state = (arm_debug_state64_t *) tstate;
1008 		thread_state = find_debug_state64(thread);
1009 
1010 		if (thread_state == NULL) {
1011 			bzero(state, sizeof(arm_debug_state64_t));
1012 		} else {
1013 			bcopy(thread_state, state, sizeof(arm_debug_state64_t));
1014 		}
1015 
1016 		*count = ARM_DEBUG_STATE64_COUNT;
1017 		break;
1018 	}
1019 
1020 	case ARM_VFP_STATE:{
1021 		struct arm_vfp_state *state;
1022 		arm_neon_saved_state32_t *thread_state;
1023 		unsigned int max;
1024 
1025 		if (*count < ARM_VFP_STATE_COUNT) {
1026 			if (*count < ARM_VFPV2_STATE_COUNT) {
1027 				return KERN_INVALID_ARGUMENT;
1028 			} else {
1029 				*count =  ARM_VFPV2_STATE_COUNT;
1030 			}
1031 		}
1032 
1033 		if (*count == ARM_VFPV2_STATE_COUNT) {
1034 			max = 32;
1035 		} else {
1036 			max = 64;
1037 		}
1038 
1039 		state = (struct arm_vfp_state *) tstate;
1040 		thread_state = neon_state32(thread->machine.uNeon);
1041 		/* ARM64 TODO: set fpsr and fpcr from state->fpscr */
1042 
1043 		bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
1044 		*count = (max + 1);
1045 		break;
1046 	}
1047 	case ARM_NEON_STATE:{
1048 		arm_neon_state_t *state;
1049 		arm_neon_saved_state32_t *thread_state;
1050 
1051 		if (*count < ARM_NEON_STATE_COUNT) {
1052 			return KERN_INVALID_ARGUMENT;
1053 		}
1054 
1055 		if (thread_is_64bit_data(thread)) {
1056 			return KERN_INVALID_ARGUMENT;
1057 		}
1058 
1059 		state = (arm_neon_state_t *)tstate;
1060 		thread_state = neon_state32(thread->machine.uNeon);
1061 
1062 		assert(sizeof(*thread_state) == sizeof(*state));
1063 		bcopy(thread_state, state, sizeof(arm_neon_state_t));
1064 
1065 		*count = ARM_NEON_STATE_COUNT;
1066 		break;
1067 	}
1068 
1069 	case ARM_NEON_STATE64:{
1070 		arm_neon_state64_t *state;
1071 		arm_neon_saved_state64_t *thread_state;
1072 
1073 		if (*count < ARM_NEON_STATE64_COUNT) {
1074 			return KERN_INVALID_ARGUMENT;
1075 		}
1076 
1077 		if (!thread_is_64bit_data(thread)) {
1078 			return KERN_INVALID_ARGUMENT;
1079 		}
1080 
1081 		state = (arm_neon_state64_t *)tstate;
1082 		thread_state = neon_state64(thread->machine.uNeon);
1083 
1084 		/* For now, these are identical */
1085 		assert(sizeof(*state) == sizeof(*thread_state));
1086 		bcopy(thread_state, state, sizeof(arm_neon_state64_t));
1087 
1088 
1089 		*count = ARM_NEON_STATE64_COUNT;
1090 		break;
1091 	}
1092 
1093 
1094 	case ARM_PAGEIN_STATE: {
1095 		arm_pagein_state_t *state;
1096 
1097 		if (*count < ARM_PAGEIN_STATE_COUNT) {
1098 			return KERN_INVALID_ARGUMENT;
1099 		}
1100 
1101 		state = (arm_pagein_state_t *)tstate;
1102 		state->__pagein_error = thread->t_pagein_error;
1103 
1104 		*count = ARM_PAGEIN_STATE_COUNT;
1105 		break;
1106 	}
1107 
1108 
1109 	default:
1110 		return KERN_INVALID_ARGUMENT;
1111 	}
1112 	return KERN_SUCCESS;
1113 }
1114 
1115 
1116 /*
1117  * Routine: machine_thread_get_kern_state
1118  *
1119  */
1120 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1121 machine_thread_get_kern_state(thread_t                 thread,
1122     thread_flavor_t          flavor,
1123     thread_state_t           tstate,
1124     mach_msg_type_number_t * count)
1125 {
1126 	/*
1127 	 * This works only for an interrupted kernel thread
1128 	 */
1129 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1130 		return KERN_FAILURE;
1131 	}
1132 
1133 	switch (flavor) {
1134 	case ARM_THREAD_STATE:
1135 	{
1136 		kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1137 		if (rn) {
1138 			return rn;
1139 		}
1140 		break;
1141 	}
1142 	case ARM_THREAD_STATE32:
1143 	{
1144 		kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1145 		if (rn) {
1146 			return rn;
1147 		}
1148 		break;
1149 	}
1150 #if __arm64__
1151 	case ARM_THREAD_STATE64:
1152 	{
1153 		kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1154 		if (rn) {
1155 			return rn;
1156 		}
1157 		break;
1158 	}
1159 #endif
1160 	default:
1161 		return KERN_INVALID_ARGUMENT;
1162 	}
1163 	return KERN_SUCCESS;
1164 }
1165 
1166 void
machine_thread_switch_addrmode(thread_t thread)1167 machine_thread_switch_addrmode(thread_t thread)
1168 {
1169 	if (task_has_64Bit_data(get_threadtask(thread))) {
1170 		thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
1171 		thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
1172 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1173 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1174 
1175 		/*
1176 		 * Reinitialize the NEON state.
1177 		 */
1178 		bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1179 		thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
1180 	} else {
1181 		thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
1182 		thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
1183 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1184 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1185 
1186 		/*
1187 		 * Reinitialize the NEON state.
1188 		 */
1189 		bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1190 		thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
1191 	}
1192 }
1193 
1194 extern long long arm_debug_get(void);
1195 
1196 /*
1197  * Routine: machine_thread_set_state
1198  *
1199  */
1200 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)1201 machine_thread_set_state(thread_t               thread,
1202     thread_flavor_t        flavor,
1203     thread_state_t         tstate,
1204     mach_msg_type_number_t count)
1205 {
1206 	kern_return_t rn;
1207 
1208 	switch (flavor) {
1209 	case ARM_THREAD_STATE:
1210 		rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
1211 		if (rn) {
1212 			return rn;
1213 		}
1214 		break;
1215 
1216 	case ARM_THREAD_STATE32:
1217 		if (thread_is_64bit_data(thread)) {
1218 			return KERN_INVALID_ARGUMENT;
1219 		}
1220 
1221 		rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
1222 		if (rn) {
1223 			return rn;
1224 		}
1225 		break;
1226 
1227 #if __arm64__
1228 	case ARM_THREAD_STATE64:
1229 		if (!thread_is_64bit_data(thread)) {
1230 			return KERN_INVALID_ARGUMENT;
1231 		}
1232 
1233 
1234 		rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
1235 		if (rn) {
1236 			return rn;
1237 		}
1238 		break;
1239 #endif
1240 	case ARM_EXCEPTION_STATE:{
1241 		if (count != ARM_EXCEPTION_STATE_COUNT) {
1242 			return KERN_INVALID_ARGUMENT;
1243 		}
1244 		if (thread_is_64bit_data(thread)) {
1245 			return KERN_INVALID_ARGUMENT;
1246 		}
1247 
1248 		break;
1249 	}
1250 	case ARM_EXCEPTION_STATE64:{
1251 		if (count != ARM_EXCEPTION_STATE64_COUNT) {
1252 			return KERN_INVALID_ARGUMENT;
1253 		}
1254 		if (!thread_is_64bit_data(thread)) {
1255 			return KERN_INVALID_ARGUMENT;
1256 		}
1257 
1258 		break;
1259 	}
1260 	case ARM_DEBUG_STATE:
1261 	{
1262 		arm_legacy_debug_state_t *state;
1263 		boolean_t enabled = FALSE;
1264 		unsigned int    i;
1265 
1266 		if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
1267 			return KERN_INVALID_ARGUMENT;
1268 		}
1269 		if (thread_is_64bit_data(thread)) {
1270 			return KERN_INVALID_ARGUMENT;
1271 		}
1272 
1273 		state = (arm_legacy_debug_state_t *) tstate;
1274 
1275 		for (i = 0; i < 16; i++) {
1276 			/* do not allow context IDs to be set */
1277 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1278 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1279 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1280 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1281 				return KERN_PROTECTION_FAILURE;
1282 			}
1283 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1284 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1285 				enabled = TRUE;
1286 			}
1287 		}
1288 
1289 		if (!enabled) {
1290 			free_debug_state(thread);
1291 		} else {
1292 			arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread);
1293 
1294 			if (thread_state == NULL) {
1295 				return KERN_FAILURE;
1296 			}
1297 
1298 			for (i = 0; i < 16; i++) {
1299 				/* set appropriate privilege; mask out unknown bits */
1300 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1301 				    | ARM_DBGBCR_MATCH_MASK
1302 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1303 				    | ARM_DBG_CR_ENABLE_MASK))
1304 				    | ARM_DBGBCR_TYPE_IVA
1305 				    | ARM_DBG_CR_LINKED_UNLINKED
1306 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1307 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1308 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1309 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1310 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1311 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1312 				    | ARM_DBG_CR_ENABLE_MASK))
1313 				    | ARM_DBG_CR_LINKED_UNLINKED
1314 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1315 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1316 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1317 			}
1318 
1319 			thread_state->mdscr_el1 = 0ULL;         // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1320 		}
1321 
1322 		if (thread == current_thread()) {
1323 			arm_debug_set32(thread->machine.DebugData);
1324 		}
1325 
1326 		break;
1327 	}
1328 	case ARM_DEBUG_STATE32:
1329 		/* ARM64_TODO  subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1330 	{
1331 		arm_debug_state32_t *state;
1332 		boolean_t enabled = FALSE;
1333 		unsigned int    i;
1334 
1335 		if (count != ARM_DEBUG_STATE32_COUNT) {
1336 			return KERN_INVALID_ARGUMENT;
1337 		}
1338 		if (thread_is_64bit_data(thread)) {
1339 			return KERN_INVALID_ARGUMENT;
1340 		}
1341 
1342 		state = (arm_debug_state32_t *) tstate;
1343 
1344 		if (state->mdscr_el1 & MDSCR_SS) {
1345 			enabled = TRUE;
1346 		}
1347 
1348 		for (i = 0; i < 16; i++) {
1349 			/* do not allow context IDs to be set */
1350 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1351 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1352 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1353 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1354 				return KERN_PROTECTION_FAILURE;
1355 			}
1356 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1357 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1358 				enabled = TRUE;
1359 			}
1360 		}
1361 
1362 		if (!enabled) {
1363 			free_debug_state(thread);
1364 		} else {
1365 			arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread);
1366 
1367 			if (thread_state == NULL) {
1368 				return KERN_FAILURE;
1369 			}
1370 
1371 			if (state->mdscr_el1 & MDSCR_SS) {
1372 				thread_state->mdscr_el1 |= MDSCR_SS;
1373 			} else {
1374 				thread_state->mdscr_el1 &= ~MDSCR_SS;
1375 			}
1376 
1377 			for (i = 0; i < 16; i++) {
1378 				/* set appropriate privilege; mask out unknown bits */
1379 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1380 				    | ARM_DBGBCR_MATCH_MASK
1381 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1382 				    | ARM_DBG_CR_ENABLE_MASK))
1383 				    | ARM_DBGBCR_TYPE_IVA
1384 				    | ARM_DBG_CR_LINKED_UNLINKED
1385 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1386 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1387 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1388 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1389 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1390 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1391 				    | ARM_DBG_CR_ENABLE_MASK))
1392 				    | ARM_DBG_CR_LINKED_UNLINKED
1393 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1394 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1395 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1396 			}
1397 		}
1398 
1399 		if (thread == current_thread()) {
1400 			arm_debug_set32(thread->machine.DebugData);
1401 		}
1402 
1403 		break;
1404 	}
1405 
1406 	case ARM_DEBUG_STATE64:
1407 	{
1408 		arm_debug_state64_t *state;
1409 		boolean_t enabled = FALSE;
1410 		unsigned int i;
1411 
1412 		if (count != ARM_DEBUG_STATE64_COUNT) {
1413 			return KERN_INVALID_ARGUMENT;
1414 		}
1415 		if (!thread_is_64bit_data(thread)) {
1416 			return KERN_INVALID_ARGUMENT;
1417 		}
1418 
1419 		state = (arm_debug_state64_t *) tstate;
1420 
1421 		if (state->mdscr_el1 & MDSCR_SS) {
1422 			enabled = TRUE;
1423 		}
1424 
1425 		for (i = 0; i < 16; i++) {
1426 			/* do not allow context IDs to be set */
1427 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1428 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1429 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1430 				return KERN_PROTECTION_FAILURE;
1431 			}
1432 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1433 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1434 				enabled = TRUE;
1435 			}
1436 		}
1437 
1438 		if (!enabled) {
1439 			free_debug_state(thread);
1440 		} else {
1441 			arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread);
1442 
1443 			if (thread_state == NULL) {
1444 				return KERN_FAILURE;
1445 			}
1446 
1447 			if (state->mdscr_el1 & MDSCR_SS) {
1448 				thread_state->mdscr_el1 |= MDSCR_SS;
1449 			} else {
1450 				thread_state->mdscr_el1 &= ~MDSCR_SS;
1451 			}
1452 
1453 			for (i = 0; i < 16; i++) {
1454 				/* set appropriate privilege; mask out unknown bits */
1455 				thread_state->bcr[i] = (state->bcr[i] & (0         /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1456 				    | 0                             /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1457 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1458 				    | ARM_DBG_CR_ENABLE_MASK))
1459 				    | ARM_DBGBCR_TYPE_IVA
1460 				    | ARM_DBG_CR_LINKED_UNLINKED
1461 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1462 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1463 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1464 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1465 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1466 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1467 				    | ARM_DBG_CR_ENABLE_MASK))
1468 				    | ARM_DBG_CR_LINKED_UNLINKED
1469 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1470 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1471 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1472 			}
1473 		}
1474 
1475 		if (thread == current_thread()) {
1476 			arm_debug_set64(thread->machine.DebugData);
1477 		}
1478 
1479 		break;
1480 	}
1481 
1482 	case ARM_VFP_STATE:{
1483 		struct arm_vfp_state *state;
1484 		arm_neon_saved_state32_t *thread_state;
1485 		unsigned int    max;
1486 
1487 		if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1488 			return KERN_INVALID_ARGUMENT;
1489 		}
1490 
1491 		if (count == ARM_VFPV2_STATE_COUNT) {
1492 			max = 32;
1493 		} else {
1494 			max = 64;
1495 		}
1496 
1497 		state = (struct arm_vfp_state *) tstate;
1498 		thread_state = neon_state32(thread->machine.uNeon);
1499 		/* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1500 
1501 		bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1502 
1503 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1504 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1505 		break;
1506 	}
1507 
1508 	case ARM_NEON_STATE:{
1509 		arm_neon_state_t *state;
1510 		arm_neon_saved_state32_t *thread_state;
1511 
1512 		if (count != ARM_NEON_STATE_COUNT) {
1513 			return KERN_INVALID_ARGUMENT;
1514 		}
1515 
1516 		if (thread_is_64bit_data(thread)) {
1517 			return KERN_INVALID_ARGUMENT;
1518 		}
1519 
1520 		state = (arm_neon_state_t *)tstate;
1521 		thread_state = neon_state32(thread->machine.uNeon);
1522 
1523 		assert(sizeof(*state) == sizeof(*thread_state));
1524 		bcopy(state, thread_state, sizeof(arm_neon_state_t));
1525 
1526 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1527 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1528 		break;
1529 	}
1530 
1531 	case ARM_NEON_STATE64:{
1532 		arm_neon_state64_t *state;
1533 		arm_neon_saved_state64_t *thread_state;
1534 
1535 		if (count != ARM_NEON_STATE64_COUNT) {
1536 			return KERN_INVALID_ARGUMENT;
1537 		}
1538 
1539 		if (!thread_is_64bit_data(thread)) {
1540 			return KERN_INVALID_ARGUMENT;
1541 		}
1542 
1543 		state = (arm_neon_state64_t *)tstate;
1544 		thread_state = neon_state64(thread->machine.uNeon);
1545 
1546 		assert(sizeof(*state) == sizeof(*thread_state));
1547 		bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1548 
1549 
1550 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1551 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1552 		break;
1553 	}
1554 
1555 
1556 	default:
1557 		return KERN_INVALID_ARGUMENT;
1558 	}
1559 	return KERN_SUCCESS;
1560 }
1561 
1562 mach_vm_address_t
machine_thread_pc(thread_t thread)1563 machine_thread_pc(thread_t thread)
1564 {
1565 	struct arm_saved_state *ss = get_user_regs(thread);
1566 	return (mach_vm_address_t)get_saved_state_pc(ss);
1567 }
1568 
1569 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)1570 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1571 {
1572 	set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1573 }
1574 
1575 /*
1576  * Routine: machine_thread_state_initialize
1577  *
1578  */
1579 void
machine_thread_state_initialize(thread_t thread)1580 machine_thread_state_initialize(thread_t thread)
1581 {
1582 	arm_context_t *context = thread->machine.contextData;
1583 
1584 	/*
1585 	 * Should always be set up later. For a kernel thread, we don't care
1586 	 * about this state. For a user thread, we'll set the state up in
1587 	 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1588 	 */
1589 
1590 	if (context != NULL) {
1591 		bzero(&context->ss.uss, sizeof(context->ss.uss));
1592 		bzero(&context->ns.uns, sizeof(context->ns.uns));
1593 
1594 		if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1595 			context->ns.ns_64.fpcr = FPCR_DEFAULT;
1596 		} else {
1597 			context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1598 		}
1599 		context->ss.ss_64.cpsr = PSR64_USER64_DEFAULT;
1600 	}
1601 
1602 	thread->machine.DebugData = NULL;
1603 
1604 #if defined(HAS_APPLE_PAC)
1605 	/* Sign the initial user-space thread state */
1606 	if (thread->machine.upcb != NULL) {
1607 		uint64_t intr = ml_pac_safe_interrupts_disable();
1608 		asm volatile (
1609                         "mov	x0, %[iss]"             "\n"
1610                         "mov	x1, #0"                 "\n"
1611                         "mov	w2, %w[usr]"            "\n"
1612                         "mov	x3, #0"                 "\n"
1613                         "mov	x4, #0"                 "\n"
1614                         "mov	x5, #0"                 "\n"
1615                         "mov	x6, lr"                 "\n"
1616                         "msr	SPSel, #1"              "\n"
1617                         "bl     _ml_sign_thread_state"  "\n"
1618                         "msr	SPSel, #0"              "\n"
1619                         "mov	lr, x6"                 "\n"
1620                         :
1621                         : [iss] "r"(thread->machine.upcb), [usr] "r"(thread->machine.upcb->ss_64.cpsr)
1622                         : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
1623                 );
1624 		ml_pac_safe_interrupts_restore(intr);
1625 	}
1626 #endif /* defined(HAS_APPLE_PAC) */
1627 }
1628 
1629 /*
1630  * Routine: machine_thread_dup
1631  *
1632  */
1633 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)1634 machine_thread_dup(thread_t self,
1635     thread_t target,
1636     __unused boolean_t is_corpse)
1637 {
1638 	struct arm_saved_state *self_saved_state;
1639 	struct arm_saved_state *target_saved_state;
1640 
1641 	target->machine.cthread_self = self->machine.cthread_self;
1642 
1643 	self_saved_state = self->machine.upcb;
1644 	target_saved_state = target->machine.upcb;
1645 	bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1646 #if defined(HAS_APPLE_PAC)
1647 	if (!is_corpse && is_saved_state64(self_saved_state)) {
1648 		check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1649 	}
1650 #endif /* defined(HAS_APPLE_PAC) */
1651 
1652 	arm_neon_saved_state_t *self_neon_state = self->machine.uNeon;
1653 	arm_neon_saved_state_t *target_neon_state = target->machine.uNeon;
1654 	bcopy(self_neon_state, target_neon_state, sizeof(*target_neon_state));
1655 
1656 	return KERN_SUCCESS;
1657 }
1658 
1659 /*
1660  * Routine: get_user_regs
1661  *
1662  */
1663 struct arm_saved_state *
get_user_regs(thread_t thread)1664 get_user_regs(thread_t thread)
1665 {
1666 	return thread->machine.upcb;
1667 }
1668 
1669 arm_neon_saved_state_t *
get_user_neon_regs(thread_t thread)1670 get_user_neon_regs(thread_t thread)
1671 {
1672 	return thread->machine.uNeon;
1673 }
1674 
1675 /*
1676  * Routine: find_user_regs
1677  *
1678  */
1679 struct arm_saved_state *
find_user_regs(thread_t thread)1680 find_user_regs(thread_t thread)
1681 {
1682 	return thread->machine.upcb;
1683 }
1684 
1685 /*
1686  * Routine: find_kern_regs
1687  *
1688  */
1689 struct arm_saved_state *
find_kern_regs(thread_t thread)1690 find_kern_regs(thread_t thread)
1691 {
1692 	/*
1693 	 * This works only for an interrupted kernel thread
1694 	 */
1695 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1696 		return (struct arm_saved_state *) NULL;
1697 	} else {
1698 		return getCpuDatap()->cpu_int_state;
1699 	}
1700 }
1701 
1702 arm_debug_state32_t *
find_debug_state32(thread_t thread)1703 find_debug_state32(thread_t thread)
1704 {
1705 	if (thread && thread->machine.DebugData) {
1706 		return &(thread->machine.DebugData->uds.ds32);
1707 	} else {
1708 		return NULL;
1709 	}
1710 }
1711 
1712 arm_debug_state64_t *
find_debug_state64(thread_t thread)1713 find_debug_state64(thread_t thread)
1714 {
1715 	if (thread && thread->machine.DebugData) {
1716 		return &(thread->machine.DebugData->uds.ds64);
1717 	} else {
1718 		return NULL;
1719 	}
1720 }
1721 
1722 os_refgrp_decl(static, dbg_refgrp, "arm_debug_state", NULL);
1723 
1724 /**
1725  *  Finds the debug state for the given 64 bit thread, allocating one if it
1726  *  does not exist.
1727  *
1728  *  @param thread 64 bit thread to find or allocate debug state for
1729  *
1730  *  @returns A pointer to the given thread's 64 bit debug state or a null
1731  *           pointer if the given thread is null or the allocation of a new
1732  *           debug state fails.
1733  */
1734 arm_debug_state64_t *
find_or_allocate_debug_state64(thread_t thread)1735 find_or_allocate_debug_state64(thread_t thread)
1736 {
1737 	arm_debug_state64_t *thread_state = find_debug_state64(thread);
1738 	if (thread != NULL && thread_state == NULL) {
1739 		thread->machine.DebugData = zalloc_flags(ads_zone,
1740 		    Z_WAITOK | Z_NOFAIL);
1741 		bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1742 		thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1743 		thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1744 		os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1745 		thread_state = find_debug_state64(thread);
1746 	}
1747 	return thread_state;
1748 }
1749 
1750 /**
1751  *  Finds the debug state for the given 32 bit thread, allocating one if it
1752  *  does not exist.
1753  *
1754  *  @param thread 32 bit thread to find or allocate debug state for
1755  *
1756  *  @returns A pointer to the given thread's 32 bit debug state or a null
1757  *           pointer if the given thread is null or the allocation of a new
1758  *           debug state fails.
1759  */
1760 arm_debug_state32_t *
find_or_allocate_debug_state32(thread_t thread)1761 find_or_allocate_debug_state32(thread_t thread)
1762 {
1763 	arm_debug_state32_t *thread_state = find_debug_state32(thread);
1764 	if (thread != NULL && thread_state == NULL) {
1765 		thread->machine.DebugData = zalloc_flags(ads_zone,
1766 		    Z_WAITOK | Z_NOFAIL);
1767 		bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1768 		thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1769 		thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1770 		os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1771 		thread_state = find_debug_state32(thread);
1772 	}
1773 	return thread_state;
1774 }
1775 
1776 /**
1777  *	Frees a thread's debug state if allocated. Otherwise does nothing.
1778  *
1779  *  @param thread thread to free the debug state of
1780  */
1781 static inline void
free_debug_state(thread_t thread)1782 free_debug_state(thread_t thread)
1783 {
1784 	if (thread != NULL && thread->machine.DebugData != NULL) {
1785 		arm_debug_state_t *pTmp = thread->machine.DebugData;
1786 		thread->machine.DebugData = NULL;
1787 
1788 		if (os_ref_release(&pTmp->ref) == 0) {
1789 			zfree(ads_zone, pTmp);
1790 		}
1791 	}
1792 }
1793 
1794 /*
1795  * Routine: thread_userstack
1796  *
1797  */
1798 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,boolean_t is_64bit_data)1799 thread_userstack(__unused thread_t  thread,
1800     int                flavor,
1801     thread_state_t     tstate,
1802     unsigned int       count,
1803     mach_vm_offset_t * user_stack,
1804     int *              customstack,
1805     boolean_t          is_64bit_data
1806     )
1807 {
1808 	register_t sp;
1809 
1810 	switch (flavor) {
1811 	case ARM_THREAD_STATE:
1812 		if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1813 #if __arm64__
1814 			if (is_64bit_data) {
1815 				sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1816 			} else
1817 #endif
1818 			{
1819 				sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1820 			}
1821 
1822 			break;
1823 		}
1824 
1825 		/* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1826 		OS_FALLTHROUGH;
1827 	case ARM_THREAD_STATE32:
1828 		if (count != ARM_THREAD_STATE32_COUNT) {
1829 			return KERN_INVALID_ARGUMENT;
1830 		}
1831 		if (is_64bit_data) {
1832 			return KERN_INVALID_ARGUMENT;
1833 		}
1834 
1835 		sp = ((arm_thread_state32_t *)tstate)->sp;
1836 		break;
1837 #if __arm64__
1838 	case ARM_THREAD_STATE64:
1839 		if (count != ARM_THREAD_STATE64_COUNT) {
1840 			return KERN_INVALID_ARGUMENT;
1841 		}
1842 		if (!is_64bit_data) {
1843 			return KERN_INVALID_ARGUMENT;
1844 		}
1845 
1846 		sp = ((arm_thread_state32_t *)tstate)->sp;
1847 		break;
1848 #endif
1849 	default:
1850 		return KERN_INVALID_ARGUMENT;
1851 	}
1852 
1853 	if (sp) {
1854 		*user_stack = CAST_USER_ADDR_T(sp);
1855 		if (customstack) {
1856 			*customstack = 1;
1857 		}
1858 	} else {
1859 		*user_stack = CAST_USER_ADDR_T(USRSTACK64);
1860 		if (customstack) {
1861 			*customstack = 0;
1862 		}
1863 	}
1864 
1865 	return KERN_SUCCESS;
1866 }
1867 
1868 /*
1869  * thread_userstackdefault:
1870  *
1871  * Return the default stack location for the
1872  * thread, if otherwise unknown.
1873  */
1874 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)1875 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1876     boolean_t          is64bit)
1877 {
1878 	if (is64bit) {
1879 		*default_user_stack = USRSTACK64;
1880 	} else {
1881 		*default_user_stack = USRSTACK;
1882 	}
1883 
1884 	return KERN_SUCCESS;
1885 }
1886 
1887 /*
1888  * Routine: thread_setuserstack
1889  *
1890  */
1891 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)1892 thread_setuserstack(thread_t          thread,
1893     mach_vm_address_t user_stack)
1894 {
1895 	struct arm_saved_state *sv;
1896 
1897 	sv = get_user_regs(thread);
1898 
1899 	set_saved_state_sp(sv, user_stack);
1900 
1901 	return;
1902 }
1903 
1904 /*
1905  * Routine: thread_adjuserstack
1906  *
1907  */
1908 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)1909 thread_adjuserstack(thread_t thread,
1910     int      adjust)
1911 {
1912 	struct arm_saved_state *sv;
1913 	uint64_t sp;
1914 
1915 	sv = get_user_regs(thread);
1916 
1917 	sp = get_saved_state_sp(sv);
1918 	sp += adjust;
1919 	set_saved_state_sp(sv, sp);
1920 
1921 	return sp;
1922 }
1923 
1924 
1925 /*
1926  * Routine: thread_setentrypoint
1927  *
1928  */
1929 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)1930 thread_setentrypoint(thread_t         thread,
1931     mach_vm_offset_t entry)
1932 {
1933 	struct arm_saved_state *sv;
1934 
1935 	sv = get_user_regs(thread);
1936 
1937 	set_saved_state_pc(sv, entry);
1938 
1939 	return;
1940 }
1941 
1942 /*
1943  * Routine: thread_entrypoint
1944  *
1945  */
1946 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)1947 thread_entrypoint(__unused thread_t  thread,
1948     int                flavor,
1949     thread_state_t     tstate,
1950     unsigned int       count,
1951     mach_vm_offset_t * entry_point
1952     )
1953 {
1954 	switch (flavor) {
1955 	case ARM_THREAD_STATE:
1956 	{
1957 		struct arm_thread_state *state;
1958 
1959 		if (count != ARM_THREAD_STATE_COUNT) {
1960 			return KERN_INVALID_ARGUMENT;
1961 		}
1962 
1963 		state = (struct arm_thread_state *) tstate;
1964 
1965 		/*
1966 		 * If a valid entry point is specified, use it.
1967 		 */
1968 		if (state->pc) {
1969 			*entry_point = CAST_USER_ADDR_T(state->pc);
1970 		} else {
1971 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1972 		}
1973 	}
1974 	break;
1975 
1976 	case ARM_THREAD_STATE64:
1977 	{
1978 		struct arm_thread_state64 *state;
1979 
1980 		if (count != ARM_THREAD_STATE64_COUNT) {
1981 			return KERN_INVALID_ARGUMENT;
1982 		}
1983 
1984 		state = (struct arm_thread_state64*) tstate;
1985 
1986 		/*
1987 		 * If a valid entry point is specified, use it.
1988 		 */
1989 		if (state->pc) {
1990 			*entry_point = CAST_USER_ADDR_T(state->pc);
1991 		} else {
1992 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
1993 		}
1994 
1995 		break;
1996 	}
1997 	default:
1998 		return KERN_INVALID_ARGUMENT;
1999 	}
2000 
2001 	return KERN_SUCCESS;
2002 }
2003 
2004 
2005 /*
2006  * Routine: thread_set_child
2007  *
2008  */
2009 void
thread_set_child(thread_t child,int pid)2010 thread_set_child(thread_t child,
2011     int      pid)
2012 {
2013 	struct arm_saved_state *child_state;
2014 
2015 	child_state = get_user_regs(child);
2016 
2017 	set_saved_state_reg(child_state, 0, pid);
2018 	set_saved_state_reg(child_state, 1, 1ULL);
2019 }
2020 
2021 
2022 /*
2023  * Routine: thread_set_parent
2024  *
2025  */
2026 void
thread_set_parent(thread_t parent,int pid)2027 thread_set_parent(thread_t parent,
2028     int      pid)
2029 {
2030 	struct arm_saved_state *parent_state;
2031 
2032 	parent_state = get_user_regs(parent);
2033 
2034 	set_saved_state_reg(parent_state, 0, pid);
2035 	set_saved_state_reg(parent_state, 1, 0);
2036 }
2037 
2038 
2039 struct arm_act_context {
2040 	struct arm_unified_thread_state ss;
2041 #if __ARM_VFP__
2042 	struct arm_neon_saved_state ns;
2043 #endif
2044 };
2045 
2046 /*
2047  * Routine: act_thread_csave
2048  *
2049  */
2050 void *
act_thread_csave(void)2051 act_thread_csave(void)
2052 {
2053 	struct arm_act_context *ic;
2054 	kern_return_t   kret;
2055 	unsigned int    val;
2056 	thread_t thread = current_thread();
2057 
2058 	ic = kalloc_type(struct arm_act_context, Z_WAITOK);
2059 	if (ic == (struct arm_act_context *) NULL) {
2060 		return (void *) 0;
2061 	}
2062 
2063 	val = ARM_UNIFIED_THREAD_STATE_COUNT;
2064 	kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
2065 	if (kret != KERN_SUCCESS) {
2066 		kfree_type(struct arm_act_context, ic);
2067 		return (void *) 0;
2068 	}
2069 
2070 #if __ARM_VFP__
2071 	if (thread_is_64bit_data(thread)) {
2072 		val = ARM_NEON_STATE64_COUNT;
2073 		kret = machine_thread_get_state(thread,
2074 		    ARM_NEON_STATE64,
2075 		    (thread_state_t)&ic->ns,
2076 		    &val);
2077 	} else {
2078 		val = ARM_NEON_STATE_COUNT;
2079 		kret = machine_thread_get_state(thread,
2080 		    ARM_NEON_STATE,
2081 		    (thread_state_t)&ic->ns,
2082 		    &val);
2083 	}
2084 	if (kret != KERN_SUCCESS) {
2085 		kfree_type(struct arm_act_context, ic);
2086 		return (void *) 0;
2087 	}
2088 #endif
2089 	return ic;
2090 }
2091 
2092 /*
2093  * Routine: act_thread_catt
2094  *
2095  */
2096 void
act_thread_catt(void * ctx)2097 act_thread_catt(void * ctx)
2098 {
2099 	struct arm_act_context *ic;
2100 	kern_return_t   kret;
2101 	thread_t thread = current_thread();
2102 
2103 	ic = (struct arm_act_context *) ctx;
2104 	if (ic == (struct arm_act_context *) NULL) {
2105 		return;
2106 	}
2107 
2108 	kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
2109 	if (kret != KERN_SUCCESS) {
2110 		goto out;
2111 	}
2112 
2113 #if __ARM_VFP__
2114 	if (thread_is_64bit_data(thread)) {
2115 		kret = machine_thread_set_state(thread,
2116 		    ARM_NEON_STATE64,
2117 		    (thread_state_t)&ic->ns,
2118 		    ARM_NEON_STATE64_COUNT);
2119 	} else {
2120 		kret = machine_thread_set_state(thread,
2121 		    ARM_NEON_STATE,
2122 		    (thread_state_t)&ic->ns,
2123 		    ARM_NEON_STATE_COUNT);
2124 	}
2125 	if (kret != KERN_SUCCESS) {
2126 		goto out;
2127 	}
2128 #endif
2129 out:
2130 	kfree_type(struct arm_act_context, ic);
2131 }
2132 
2133 /*
2134  * Routine: act_thread_catt
2135  *
2136  */
2137 void
act_thread_cfree(void * ctx)2138 act_thread_cfree(void *ctx)
2139 {
2140 	kfree_type(struct arm_act_context, ctx);
2141 }
2142 
2143 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)2144 thread_set_wq_state32(thread_t       thread,
2145     thread_state_t tstate)
2146 {
2147 	arm_thread_state_t *state;
2148 	struct arm_saved_state *saved_state;
2149 	struct arm_saved_state32 *saved_state_32;
2150 	thread_t curth = current_thread();
2151 	spl_t s = 0;
2152 
2153 	assert(!thread_is_64bit_data(thread));
2154 
2155 	saved_state = thread->machine.upcb;
2156 	saved_state_32 = saved_state32(saved_state);
2157 
2158 	state = (arm_thread_state_t *)tstate;
2159 
2160 	if (curth != thread) {
2161 		s = splsched();
2162 		thread_lock(thread);
2163 	}
2164 
2165 	/*
2166 	 * do not zero saved_state, it can be concurrently accessed
2167 	 * and zero is not a valid state for some of the registers,
2168 	 * like sp.
2169 	 */
2170 	thread_state32_to_saved_state(state, saved_state);
2171 	saved_state_32->cpsr = PSR64_USER32_DEFAULT;
2172 
2173 	if (curth != thread) {
2174 		thread_unlock(thread);
2175 		splx(s);
2176 	}
2177 
2178 	return KERN_SUCCESS;
2179 }
2180 
2181 kern_return_t
thread_set_wq_state64(thread_t thread,thread_state_t tstate)2182 thread_set_wq_state64(thread_t       thread,
2183     thread_state_t tstate)
2184 {
2185 	arm_thread_state64_t *state;
2186 	struct arm_saved_state *saved_state;
2187 	struct arm_saved_state64 *saved_state_64;
2188 	thread_t curth = current_thread();
2189 	spl_t s = 0;
2190 
2191 	assert(thread_is_64bit_data(thread));
2192 
2193 	saved_state = thread->machine.upcb;
2194 	saved_state_64 = saved_state64(saved_state);
2195 	state = (arm_thread_state64_t *)tstate;
2196 
2197 	if (curth != thread) {
2198 		s = splsched();
2199 		thread_lock(thread);
2200 	}
2201 
2202 	/*
2203 	 * do not zero saved_state, it can be concurrently accessed
2204 	 * and zero is not a valid state for some of the registers,
2205 	 * like sp.
2206 	 */
2207 	thread_state64_to_saved_state(state, saved_state);
2208 	set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
2209 
2210 	if (curth != thread) {
2211 		thread_unlock(thread);
2212 		splx(s);
2213 	}
2214 
2215 	return KERN_SUCCESS;
2216 }
2217