xref: /xnu-10063.121.3/osfmk/arm64/status.c (revision 2c2f96dc2b9a4408a43d3150ae9c105355ca3daa)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/proc_reg.h>
39 #include <sys/random.h>
40 #if __has_feature(ptrauth_calls)
41 #include <ptrauth.h>
42 #endif
43 
44 #include <libkern/coreanalytics/coreanalytics.h>
45 
46 
47 struct arm_vfpv2_state {
48 	__uint32_t __r[32];
49 	__uint32_t __fpscr;
50 };
51 
52 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
53 
54 #define ARM_VFPV2_STATE_COUNT \
55 	((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
56 
57 /*
58  * Forward definitions
59  */
60 void thread_set_child(thread_t child, int pid);
61 static void free_debug_state(thread_t thread);
62 user_addr_t thread_get_sigreturn_token(thread_t thread);
63 uint32_t thread_get_sigreturn_diversifier(thread_t thread);
64 
65 /*
66  * Maps state flavor to number of words in the state:
67  */
68 /* __private_extern__ */
69 unsigned int _MachineStateCount[] = {
70 	[ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
71 	[ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
72 	[ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
73 	[ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
74 	[ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
75 	[ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
76 	[ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
77 	[ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
78 	[ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
79 	[ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
80 	[ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
81 	[ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
82 };
83 
84 extern zone_t ads_zone;
85 
86 #if __arm64__
87 /*
88  * Copy values from saved_state to ts64.
89  */
90 void
saved_state_to_thread_state64(const arm_saved_state_t * saved_state,arm_thread_state64_t * ts64)91 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
92     arm_thread_state64_t *    ts64)
93 {
94 	uint32_t i;
95 
96 	assert(is_saved_state64(saved_state));
97 
98 	ts64->fp = get_saved_state_fp(saved_state);
99 	ts64->lr = get_saved_state_lr(saved_state);
100 	ts64->sp = get_saved_state_sp(saved_state);
101 	ts64->pc = get_saved_state_pc(saved_state);
102 	ts64->cpsr = get_saved_state_cpsr(saved_state);
103 	for (i = 0; i < 29; i++) {
104 		ts64->x[i] = get_saved_state_reg(saved_state, i);
105 	}
106 }
107 
108 /*
109  * Copy values from ts64 to saved_state.
110  *
111  * For safety, CPSR is sanitized as follows:
112  *
113  * - ts64->cpsr.{N,Z,C,V} are copied as-is into saved_state->cpsr
114  * - ts64->cpsr.M is ignored, and saved_state->cpsr.M is reset to EL0
115  * - All other saved_state->cpsr bits are preserved as-is
116  */
117 void
thread_state64_to_saved_state(const arm_thread_state64_t * ts64,arm_saved_state_t * saved_state)118 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
119     arm_saved_state_t *          saved_state)
120 {
121 	uint32_t i;
122 #if __has_feature(ptrauth_calls)
123 	uint64_t intr = ml_pac_safe_interrupts_disable();
124 #endif /* __has_feature(ptrauth_calls) */
125 
126 	assert(is_saved_state64(saved_state));
127 
128 	const uint32_t CPSR_COPY_MASK = PSR64_USER_MASK;
129 	const uint32_t CPSR_ZERO_MASK = PSR64_MODE_MASK;
130 	const uint32_t CPSR_PRESERVE_MASK = ~(CPSR_COPY_MASK | CPSR_ZERO_MASK);
131 #if __has_feature(ptrauth_calls)
132 	/* BEGIN IGNORE CODESTYLE */
133 	MANIPULATE_SIGNED_USER_THREAD_STATE(saved_state,
134 		"and	w2, w2, %w[preserve_mask]"	"\n"
135 		"mov	w6, %w[cpsr]"			"\n"
136 		"and	w6, w6, %w[copy_mask]"		"\n"
137 		"orr	w2, w2, w6"			"\n"
138 		"str	w2, [x0, %[SS64_CPSR]]"		"\n",
139 		[cpsr] "r"(ts64->cpsr),
140 		[preserve_mask] "i"(CPSR_PRESERVE_MASK),
141 		[copy_mask] "i"(CPSR_COPY_MASK)
142 	);
143 	/* END IGNORE CODESTYLE */
144 	/*
145 	 * Make writes to ts64->cpsr visible first, since it's useful as a
146 	 * canary to detect thread-state corruption.
147 	 */
148 	__builtin_arm_dmb(DMB_ST);
149 #else
150 	uint32_t new_cpsr = get_saved_state_cpsr(saved_state);
151 	new_cpsr &= CPSR_PRESERVE_MASK;
152 	new_cpsr |= (ts64->cpsr & CPSR_COPY_MASK);
153 	set_user_saved_state_cpsr(saved_state, new_cpsr);
154 #endif /* __has_feature(ptrauth_calls) */
155 	set_saved_state_fp(saved_state, ts64->fp);
156 	set_user_saved_state_lr(saved_state, ts64->lr);
157 	set_saved_state_sp(saved_state, ts64->sp);
158 	set_user_saved_state_pc(saved_state, ts64->pc);
159 	for (i = 0; i < 29; i++) {
160 		set_user_saved_state_reg(saved_state, i, ts64->x[i]);
161 	}
162 
163 #if __has_feature(ptrauth_calls)
164 	ml_pac_safe_interrupts_restore(intr);
165 #endif /* __has_feature(ptrauth_calls) */
166 }
167 
168 #endif /* __arm64__ */
169 
170 static kern_return_t
handle_get_arm32_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)171 handle_get_arm32_thread_state(thread_state_t            tstate,
172     mach_msg_type_number_t *  count,
173     const arm_saved_state_t * saved_state)
174 {
175 	if (*count < ARM_THREAD_STATE32_COUNT) {
176 		return KERN_INVALID_ARGUMENT;
177 	}
178 	if (!is_saved_state32(saved_state)) {
179 		return KERN_INVALID_ARGUMENT;
180 	}
181 
182 	(void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
183 	*count = ARM_THREAD_STATE32_COUNT;
184 	return KERN_SUCCESS;
185 }
186 
187 static kern_return_t
handle_get_arm64_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)188 handle_get_arm64_thread_state(thread_state_t            tstate,
189     mach_msg_type_number_t *  count,
190     const arm_saved_state_t * saved_state)
191 {
192 	if (*count < ARM_THREAD_STATE64_COUNT) {
193 		return KERN_INVALID_ARGUMENT;
194 	}
195 	if (!is_saved_state64(saved_state)) {
196 		return KERN_INVALID_ARGUMENT;
197 	}
198 
199 	(void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
200 	*count = ARM_THREAD_STATE64_COUNT;
201 	return KERN_SUCCESS;
202 }
203 
204 
205 static kern_return_t
handle_get_arm_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)206 handle_get_arm_thread_state(thread_state_t            tstate,
207     mach_msg_type_number_t *  count,
208     const arm_saved_state_t * saved_state)
209 {
210 	/* In an arm64 world, this flavor can be used to retrieve the thread
211 	 * state of a 32-bit or 64-bit thread into a unified structure, but we
212 	 * need to support legacy clients who are only aware of 32-bit, so
213 	 * check the count to see what the client is expecting.
214 	 */
215 	if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
216 		return handle_get_arm32_thread_state(tstate, count, saved_state);
217 	}
218 
219 	arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
220 	bzero(unified_state, sizeof(*unified_state));
221 #if __arm64__
222 	if (is_saved_state64(saved_state)) {
223 		unified_state->ash.flavor = ARM_THREAD_STATE64;
224 		unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
225 		(void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
226 	} else
227 #endif
228 	{
229 		unified_state->ash.flavor = ARM_THREAD_STATE32;
230 		unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
231 		(void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
232 	}
233 	*count = ARM_UNIFIED_THREAD_STATE_COUNT;
234 	return KERN_SUCCESS;
235 }
236 
237 
238 static kern_return_t
handle_set_arm32_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)239 handle_set_arm32_thread_state(const thread_state_t   tstate,
240     mach_msg_type_number_t count,
241     arm_saved_state_t *    saved_state)
242 {
243 	if (count != ARM_THREAD_STATE32_COUNT) {
244 		return KERN_INVALID_ARGUMENT;
245 	}
246 
247 	(void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
248 	return KERN_SUCCESS;
249 }
250 
251 static kern_return_t
handle_set_arm64_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)252 handle_set_arm64_thread_state(const thread_state_t   tstate,
253     mach_msg_type_number_t count,
254     arm_saved_state_t *    saved_state)
255 {
256 	if (count != ARM_THREAD_STATE64_COUNT) {
257 		return KERN_INVALID_ARGUMENT;
258 	}
259 
260 	(void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
261 	return KERN_SUCCESS;
262 }
263 
264 
265 static kern_return_t
handle_set_arm_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)266 handle_set_arm_thread_state(const thread_state_t   tstate,
267     mach_msg_type_number_t count,
268     arm_saved_state_t *    saved_state)
269 {
270 	/* In an arm64 world, this flavor can be used to set the thread state of a
271 	 * 32-bit or 64-bit thread from a unified structure, but we need to support
272 	 * legacy clients who are only aware of 32-bit, so check the count to see
273 	 * what the client is expecting.
274 	 */
275 	if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
276 		if (!is_saved_state32(saved_state)) {
277 			return KERN_INVALID_ARGUMENT;
278 		}
279 		return handle_set_arm32_thread_state(tstate, count, saved_state);
280 	}
281 
282 	const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
283 #if __arm64__
284 	if (is_thread_state64(unified_state)) {
285 		if (!is_saved_state64(saved_state)) {
286 			return KERN_INVALID_ARGUMENT;
287 		}
288 		(void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
289 	} else
290 #endif
291 	{
292 		if (!is_saved_state32(saved_state)) {
293 			return KERN_INVALID_ARGUMENT;
294 		}
295 		(void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
296 	}
297 
298 	return KERN_SUCCESS;
299 }
300 
301 
302 #if __has_feature(ptrauth_calls)
303 
304 static inline uint32_t
thread_generate_sigreturn_token(void * ptr,thread_t thread)305 thread_generate_sigreturn_token(
306 	void *ptr,
307 	thread_t thread)
308 {
309 	user64_addr_t token = (user64_addr_t)ptr;
310 	token ^= (user64_addr_t)thread_get_sigreturn_token(thread);
311 	token = (user64_addr_t)pmap_sign_user_ptr((void*)token,
312 	    ptrauth_key_process_independent_data, ptrauth_string_discriminator("nonce"),
313 	    thread->machine.jop_pid);
314 	token >>= 32;
315 	return (uint32_t)token;
316 }
317 #endif //__has_feature(ptrauth_calls)
318 
319 /*
320  * Translate thread state arguments to userspace representation
321  */
322 
323 kern_return_t
machine_thread_state_convert_to_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t tssf_flags)324 machine_thread_state_convert_to_user(
325 	thread_t thread,
326 	thread_flavor_t flavor,
327 	thread_state_t tstate,
328 	mach_msg_type_number_t *count,
329 	thread_set_status_flags_t tssf_flags)
330 {
331 #if __has_feature(ptrauth_calls)
332 	arm_thread_state64_t *ts64;
333 	bool preserve_flags = !!(tssf_flags & TSSF_PRESERVE_FLAGS);
334 	bool stash_sigreturn_token = !!(tssf_flags & TSSF_STASH_SIGRETURN_TOKEN);
335 	bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
336 	bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
337 	uint32_t old_flags;
338 	bool kernel_signed_pc = true;
339 	bool kernel_signed_lr = true;
340 	uint32_t userland_diversifier = 0;
341 
342 	switch (flavor) {
343 	case ARM_THREAD_STATE:
344 	{
345 		arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
346 
347 		if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
348 			return KERN_SUCCESS;
349 		}
350 		ts64 = thread_state64(unified_state);
351 		break;
352 	}
353 	case ARM_THREAD_STATE64:
354 	{
355 		if (*count < ARM_THREAD_STATE64_COUNT) {
356 			return KERN_SUCCESS;
357 		}
358 		ts64 = (arm_thread_state64_t *)tstate;
359 		break;
360 	}
361 	default:
362 		return KERN_SUCCESS;
363 	}
364 
365 	// Note that kernel threads never have disable_user_jop set
366 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
367 	    !thread_is_64bit_addr(current_thread()) ||
368 	    (thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) || !thread_is_64bit_addr(thread)
369 	    ) {
370 		ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
371 		return KERN_SUCCESS;
372 	}
373 
374 	old_flags = ts64->flags;
375 	ts64->flags = 0;
376 	if (ts64->lr) {
377 		// lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
378 		uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
379 		    ptrauth_key_return_address);
380 		if (ts64->lr != stripped_lr) {
381 			// Need to allow already-signed lr value to round-trip as is
382 			ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
383 		}
384 		// Note that an IB-signed return address that happens to have a 0 signature value
385 		// will round-trip correctly even if IA-signed again below (and IA-authd later)
386 	}
387 
388 	if (arm_user_jop_disabled()) {
389 		return KERN_SUCCESS;
390 	}
391 
392 	if (preserve_flags) {
393 		assert(random_div == false);
394 		assert(thread_div == false);
395 
396 		/* Restore the diversifier and other opaque flags */
397 		ts64->flags |= (old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
398 		userland_diversifier = old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
399 		if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC)) {
400 			kernel_signed_pc = false;
401 		}
402 		if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR)) {
403 			kernel_signed_lr = false;
404 		}
405 	} else {
406 		/* Set a non zero userland diversifier */
407 		if (random_div) {
408 			do {
409 				read_random(&userland_diversifier, sizeof(userland_diversifier));
410 				userland_diversifier &=
411 				    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
412 			} while (userland_diversifier == 0);
413 		} else if (thread_div) {
414 			userland_diversifier = thread_get_sigreturn_diversifier(thread) &
415 			    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
416 		}
417 		ts64->flags |= userland_diversifier;
418 	}
419 
420 	if (kernel_signed_pc) {
421 		ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC;
422 	}
423 
424 	if (kernel_signed_lr) {
425 		ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR;
426 	}
427 
428 
429 	if (ts64->pc) {
430 		uint64_t discriminator = ptrauth_string_discriminator("pc");
431 		if (!kernel_signed_pc && userland_diversifier != 0) {
432 			discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
433 			    ptrauth_string_discriminator("pc"));
434 		}
435 
436 		ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
437 		    ptrauth_key_process_independent_code, discriminator,
438 		    thread->machine.jop_pid);
439 	}
440 	if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
441 		uint64_t discriminator = ptrauth_string_discriminator("lr");
442 		if (!kernel_signed_lr && userland_diversifier != 0) {
443 			discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
444 			    ptrauth_string_discriminator("lr"));
445 		}
446 
447 		ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
448 		    ptrauth_key_process_independent_code, discriminator,
449 		    thread->machine.jop_pid);
450 	}
451 	if (ts64->sp) {
452 		ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
453 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
454 		    thread->machine.jop_pid);
455 	}
456 	if (ts64->fp) {
457 		ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
458 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
459 		    thread->machine.jop_pid);
460 	}
461 
462 	/* Stash the sigreturn token */
463 	if (stash_sigreturn_token) {
464 		if (kernel_signed_pc) {
465 			uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
466 			__DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
467 			    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK);
468 		}
469 
470 		if (kernel_signed_lr) {
471 			uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
472 			__DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
473 			    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK);
474 		}
475 	}
476 
477 	return KERN_SUCCESS;
478 #else
479 	// No conversion to userspace representation on this platform
480 	(void)thread; (void)flavor; (void)tstate; (void)count; (void)tssf_flags;
481 	return KERN_SUCCESS;
482 #endif /* __has_feature(ptrauth_calls) */
483 }
484 
485 #if __has_feature(ptrauth_calls)
486 extern char *   proc_name_address(void *p);
487 
488 CA_EVENT(pac_thread_state_exception_event,
489     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
490 
491 static void
machine_thread_state_check_pac_state(arm_thread_state64_t * ts64,arm_thread_state64_t * old_ts64)492 machine_thread_state_check_pac_state(
493 	arm_thread_state64_t *ts64,
494 	arm_thread_state64_t *old_ts64)
495 {
496 	bool send_event = false;
497 	task_t task = current_task();
498 	void *proc = get_bsdtask_info(task);
499 	char *proc_name = (char *) "unknown";
500 
501 	if (((ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC) &&
502 	    ts64->pc != old_ts64->pc) || (!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
503 	    (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR) && (ts64->lr != old_ts64->lr ||
504 	    (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)))) {
505 		send_event = true;
506 	}
507 
508 	if (!send_event) {
509 		return;
510 	}
511 
512 	proc_name = proc_name_address(proc);
513 	ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_exception_event);
514 	CA_EVENT_TYPE(pac_thread_state_exception_event) * pexc_event = ca_event->data;
515 	strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
516 	CA_EVENT_SEND(ca_event);
517 }
518 
519 CA_EVENT(pac_thread_state_sigreturn_event,
520     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
521 
522 static bool
machine_thread_state_check_sigreturn_token(arm_thread_state64_t * ts64,thread_t thread)523 machine_thread_state_check_sigreturn_token(
524 	arm_thread_state64_t *ts64,
525 	thread_t thread)
526 {
527 	task_t task = current_task();
528 	void *proc = get_bsdtask_info(task);
529 	char *proc_name = (char *) "unknown";
530 	bool token_matched = true;
531 	bool kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
532 	bool kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
533 
534 	if (kernel_signed_pc) {
535 		/* Compute the sigreturn token */
536 		uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
537 		if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
538 		    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK)) {
539 			token_matched = false;
540 		}
541 	}
542 
543 	if (kernel_signed_lr) {
544 		/* Compute the sigreturn token */
545 		uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
546 		if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
547 		    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK)) {
548 			token_matched = false;
549 		}
550 	}
551 
552 	if (token_matched) {
553 		return true;
554 	}
555 
556 	proc_name = proc_name_address(proc);
557 	ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_sigreturn_event);
558 	CA_EVENT_TYPE(pac_thread_state_sigreturn_event) * psig_event = ca_event->data;
559 	strlcpy(psig_event->proc_name, proc_name, CA_PROCNAME_LEN);
560 	CA_EVENT_SEND(ca_event);
561 	return false;
562 }
563 
564 #endif
565 
566 /*
567  * Translate thread state arguments from userspace representation
568  */
569 
570 kern_return_t
machine_thread_state_convert_from_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t tssf_flags)571 machine_thread_state_convert_from_user(
572 	thread_t thread,
573 	thread_flavor_t flavor,
574 	thread_state_t tstate,
575 	mach_msg_type_number_t count,
576 	thread_state_t old_tstate,
577 	mach_msg_type_number_t old_count,
578 	thread_set_status_flags_t tssf_flags)
579 {
580 #if __has_feature(ptrauth_calls)
581 	arm_thread_state64_t *ts64;
582 	arm_thread_state64_t *old_ts64 = NULL;
583 	void *userland_diversifier = NULL;
584 	bool kernel_signed_pc;
585 	bool kernel_signed_lr;
586 	bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
587 	bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
588 
589 	switch (flavor) {
590 	case ARM_THREAD_STATE:
591 	{
592 		arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
593 
594 		if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
595 			return KERN_SUCCESS;
596 		}
597 		ts64 = thread_state64(unified_state);
598 
599 		arm_unified_thread_state_t *old_unified_state = (arm_unified_thread_state_t *)old_tstate;
600 		if (old_unified_state && old_count >= ARM_UNIFIED_THREAD_STATE_COUNT) {
601 			old_ts64 = thread_state64(old_unified_state);
602 		}
603 		break;
604 	}
605 	case ARM_THREAD_STATE64:
606 	{
607 		if (count != ARM_THREAD_STATE64_COUNT) {
608 			return KERN_SUCCESS;
609 		}
610 		ts64 = (arm_thread_state64_t *)tstate;
611 
612 		if (old_count == ARM_THREAD_STATE64_COUNT) {
613 			old_ts64 = (arm_thread_state64_t *)old_tstate;
614 		}
615 		break;
616 	}
617 	default:
618 		return KERN_SUCCESS;
619 	}
620 
621 	// Note that kernel threads never have disable_user_jop set
622 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
623 	    !thread_is_64bit_addr(current_thread())) {
624 		if ((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
625 		    !thread_is_64bit_addr(thread)) {
626 			ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
627 			return KERN_SUCCESS;
628 		}
629 		// A JOP-disabled process must not set thread state on a JOP-enabled process
630 		return KERN_PROTECTION_FAILURE;
631 	}
632 
633 	if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
634 		if ((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
635 		    !thread_is_64bit_addr(thread)
636 		    ) {
637 			return KERN_SUCCESS;
638 		}
639 		// Disallow setting unsigned thread state on JOP-enabled processes.
640 		// Ignore flag and treat thread state arguments as signed, ptrauth
641 		// poisoning will cause resulting thread state to be invalid
642 		ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
643 	}
644 
645 	if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
646 		// lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
647 		uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
648 		    ptrauth_key_return_address);
649 		if (ts64->lr == stripped_lr) {
650 			// Don't allow unsigned pointer to be passed through as is. Ignore flag and
651 			// treat as IA-signed below (where auth failure may poison the value).
652 			ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
653 		}
654 		// Note that an IB-signed return address that happens to have a 0 signature value
655 		// will also have been IA-signed (without this flag being set) and so will IA-auth
656 		// correctly below.
657 	}
658 
659 	if (arm_user_jop_disabled()) {
660 		return KERN_SUCCESS;
661 	}
662 
663 	kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
664 	kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
665 	/*
666 	 * Replace pc/lr with old state if allow only
667 	 * user ptr flag is passed and ptrs are marked
668 	 * kernel signed.
669 	 */
670 	if ((tssf_flags & TSSF_CHECK_USER_FLAGS) &&
671 	    (kernel_signed_pc || kernel_signed_lr)) {
672 		if (old_ts64 && old_count == count) {
673 			/* Send a CA event if the thread state does not match */
674 			machine_thread_state_check_pac_state(ts64, old_ts64);
675 
676 			/* Check if user ptrs needs to be replaced */
677 			if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
678 			    kernel_signed_pc) {
679 				ts64->pc = old_ts64->pc;
680 			}
681 
682 			if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
683 			    !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
684 			    kernel_signed_lr) {
685 				ts64->lr = old_ts64->lr;
686 				if (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
687 					ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
688 				} else {
689 					ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
690 				}
691 			}
692 		}
693 	}
694 
695 	/* Validate sigreturn token */
696 	if (tssf_flags & TSSF_CHECK_SIGRETURN_TOKEN) {
697 		bool token_matched = machine_thread_state_check_sigreturn_token(ts64, thread);
698 		if ((tssf_flags & TSSF_ALLOW_ONLY_MATCHING_TOKEN) && !token_matched) {
699 			return KERN_PROTECTION_FAILURE;
700 		}
701 	}
702 
703 	/* Get the userland diversifier */
704 	if (random_div && old_ts64 && old_count == count) {
705 		/* Get the random diversifier from the old thread state */
706 		userland_diversifier = (void *)(long)(old_ts64->flags &
707 		    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
708 	} else if (thread_div) {
709 		userland_diversifier = (void *)(long)(thread_get_sigreturn_diversifier(thread) &
710 		    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
711 	}
712 
713 	if (ts64->pc) {
714 		uint64_t discriminator = ptrauth_string_discriminator("pc");
715 		if (!kernel_signed_pc && userland_diversifier != 0) {
716 			discriminator = ptrauth_blend_discriminator(userland_diversifier,
717 			    ptrauth_string_discriminator("pc"));
718 		}
719 		ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
720 		    ptrauth_key_process_independent_code, discriminator,
721 		    thread->machine.jop_pid);
722 	}
723 	if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
724 		uint64_t discriminator = ptrauth_string_discriminator("lr");
725 		if (!kernel_signed_lr && userland_diversifier != 0) {
726 			discriminator = ptrauth_blend_discriminator(userland_diversifier,
727 			    ptrauth_string_discriminator("lr"));
728 		}
729 		ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
730 		    ptrauth_key_process_independent_code, discriminator,
731 		    thread->machine.jop_pid);
732 	}
733 	if (ts64->sp) {
734 		ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
735 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
736 		    thread->machine.jop_pid);
737 	}
738 	if (ts64->fp) {
739 		ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
740 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
741 		    thread->machine.jop_pid);
742 	}
743 
744 	return KERN_SUCCESS;
745 #else
746 	// No conversion from userspace representation on this platform
747 	(void)thread; (void)flavor; (void)tstate; (void)count;
748 	(void)old_tstate; (void)old_count; (void)tssf_flags;
749 	return KERN_SUCCESS;
750 #endif /* __has_feature(ptrauth_calls) */
751 }
752 
753 #if __has_feature(ptrauth_calls)
754 bool
machine_thread_state_is_debug_flavor(int flavor)755 machine_thread_state_is_debug_flavor(int flavor)
756 {
757 	if (flavor == ARM_DEBUG_STATE ||
758 	    flavor == ARM_DEBUG_STATE64 ||
759 	    flavor == ARM_DEBUG_STATE32) {
760 		return true;
761 	}
762 	return false;
763 }
764 #endif /* __has_feature(ptrauth_calls) */
765 
766 /*
767  * Translate signal context data pointer to userspace representation
768  */
769 
770 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(thread_t thread,user_addr_t * uctxp)771 machine_thread_siguctx_pointer_convert_to_user(
772 	thread_t thread,
773 	user_addr_t *uctxp)
774 {
775 #if __has_feature(ptrauth_calls)
776 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
777 	    !thread_is_64bit_addr(current_thread())) {
778 		assert((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) || !thread_is_64bit_addr(thread));
779 		return KERN_SUCCESS;
780 	}
781 
782 	if (arm_user_jop_disabled()) {
783 		return KERN_SUCCESS;
784 	}
785 
786 	if (*uctxp) {
787 		*uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
788 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"),
789 		    thread->machine.jop_pid);
790 	}
791 
792 	return KERN_SUCCESS;
793 #else
794 	// No conversion to userspace representation on this platform
795 	(void)thread; (void)uctxp;
796 	return KERN_SUCCESS;
797 #endif /* __has_feature(ptrauth_calls) */
798 }
799 
800 /*
801  * Translate array of function pointer syscall arguments from userspace representation
802  */
803 
804 kern_return_t
machine_thread_function_pointers_convert_from_user(thread_t thread,user_addr_t * fptrs,uint32_t count)805 machine_thread_function_pointers_convert_from_user(
806 	thread_t thread,
807 	user_addr_t *fptrs,
808 	uint32_t count)
809 {
810 #if __has_feature(ptrauth_calls)
811 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
812 	    !thread_is_64bit_addr(current_thread())) {
813 		assert((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
814 		    !thread_is_64bit_addr(thread));
815 		return KERN_SUCCESS;
816 	}
817 
818 	if (arm_user_jop_disabled()) {
819 		return KERN_SUCCESS;
820 	}
821 
822 	while (count--) {
823 		if (*fptrs) {
824 			*fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
825 			    ptrauth_key_function_pointer, 0, thread->machine.jop_pid);
826 		}
827 		fptrs++;
828 	}
829 
830 	return KERN_SUCCESS;
831 #else
832 	// No conversion from userspace representation on this platform
833 	(void)thread; (void)fptrs; (void)count;
834 	return KERN_SUCCESS;
835 #endif /* __has_feature(ptrauth_calls) */
836 }
837 
838 /*
839  * Routine: machine_thread_get_state
840  *
841  */
842 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)843 machine_thread_get_state(thread_t                 thread,
844     thread_flavor_t          flavor,
845     thread_state_t           tstate,
846     mach_msg_type_number_t * count)
847 {
848 	switch (flavor) {
849 	case THREAD_STATE_FLAVOR_LIST:
850 		if (*count < 4) {
851 			return KERN_INVALID_ARGUMENT;
852 		}
853 
854 		tstate[0] = ARM_THREAD_STATE;
855 		tstate[1] = ARM_VFP_STATE;
856 		tstate[2] = ARM_EXCEPTION_STATE;
857 		tstate[3] = ARM_DEBUG_STATE;
858 		*count = 4;
859 		break;
860 
861 	case THREAD_STATE_FLAVOR_LIST_NEW:
862 		if (*count < 4) {
863 			return KERN_INVALID_ARGUMENT;
864 		}
865 
866 		tstate[0] = ARM_THREAD_STATE;
867 		tstate[1] = ARM_VFP_STATE;
868 		tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
869 		tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
870 		*count = 4;
871 		break;
872 
873 	case THREAD_STATE_FLAVOR_LIST_10_15:
874 		if (*count < 5) {
875 			return KERN_INVALID_ARGUMENT;
876 		}
877 
878 		tstate[0] = ARM_THREAD_STATE;
879 		tstate[1] = ARM_VFP_STATE;
880 		tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
881 		tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
882 		tstate[4] = ARM_PAGEIN_STATE;
883 		*count = 5;
884 		break;
885 
886 	case ARM_THREAD_STATE:
887 	{
888 		kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
889 		if (rn) {
890 			return rn;
891 		}
892 		break;
893 	}
894 	case ARM_THREAD_STATE32:
895 	{
896 		if (thread_is_64bit_data(thread)) {
897 			return KERN_INVALID_ARGUMENT;
898 		}
899 
900 		kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
901 		if (rn) {
902 			return rn;
903 		}
904 		break;
905 	}
906 #if __arm64__
907 	case ARM_THREAD_STATE64:
908 	{
909 		if (!thread_is_64bit_data(thread)) {
910 			return KERN_INVALID_ARGUMENT;
911 		}
912 
913 		const arm_saved_state_t *current_state = thread->machine.upcb;
914 
915 		kern_return_t rn = handle_get_arm64_thread_state(tstate, count,
916 		    current_state);
917 		if (rn) {
918 			return rn;
919 		}
920 
921 		break;
922 	}
923 #endif
924 	case ARM_EXCEPTION_STATE:{
925 		struct arm_exception_state *state;
926 		struct arm_saved_state32 *saved_state;
927 
928 		if (*count < ARM_EXCEPTION_STATE_COUNT) {
929 			return KERN_INVALID_ARGUMENT;
930 		}
931 		if (thread_is_64bit_data(thread)) {
932 			return KERN_INVALID_ARGUMENT;
933 		}
934 
935 		state = (struct arm_exception_state *) tstate;
936 		saved_state = saved_state32(thread->machine.upcb);
937 
938 		state->exception = saved_state->exception;
939 		state->fsr = saved_state->esr;
940 		state->far = saved_state->far;
941 
942 		*count = ARM_EXCEPTION_STATE_COUNT;
943 		break;
944 	}
945 	case ARM_EXCEPTION_STATE64:{
946 		struct arm_exception_state64 *state;
947 		struct arm_saved_state64 *saved_state;
948 
949 		if (*count < ARM_EXCEPTION_STATE64_COUNT) {
950 			return KERN_INVALID_ARGUMENT;
951 		}
952 		if (!thread_is_64bit_data(thread)) {
953 			return KERN_INVALID_ARGUMENT;
954 		}
955 
956 		state = (struct arm_exception_state64 *) tstate;
957 		saved_state = saved_state64(thread->machine.upcb);
958 
959 		state->exception = saved_state->exception;
960 		state->far = saved_state->far;
961 		state->esr = saved_state->esr;
962 
963 		*count = ARM_EXCEPTION_STATE64_COUNT;
964 		break;
965 	}
966 	case ARM_DEBUG_STATE:{
967 		arm_legacy_debug_state_t *state;
968 		arm_debug_state32_t *thread_state;
969 
970 		if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
971 			return KERN_INVALID_ARGUMENT;
972 		}
973 
974 		if (thread_is_64bit_data(thread)) {
975 			return KERN_INVALID_ARGUMENT;
976 		}
977 
978 		state = (arm_legacy_debug_state_t *) tstate;
979 		thread_state = find_debug_state32(thread);
980 
981 		if (thread_state == NULL) {
982 			bzero(state, sizeof(arm_legacy_debug_state_t));
983 		} else {
984 			bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
985 		}
986 
987 		*count = ARM_LEGACY_DEBUG_STATE_COUNT;
988 		break;
989 	}
990 	case ARM_DEBUG_STATE32:{
991 		arm_debug_state32_t *state;
992 		arm_debug_state32_t *thread_state;
993 
994 		if (*count < ARM_DEBUG_STATE32_COUNT) {
995 			return KERN_INVALID_ARGUMENT;
996 		}
997 
998 		if (thread_is_64bit_data(thread)) {
999 			return KERN_INVALID_ARGUMENT;
1000 		}
1001 
1002 		state = (arm_debug_state32_t *) tstate;
1003 		thread_state = find_debug_state32(thread);
1004 
1005 		if (thread_state == NULL) {
1006 			bzero(state, sizeof(arm_debug_state32_t));
1007 		} else {
1008 			bcopy(thread_state, state, sizeof(arm_debug_state32_t));
1009 		}
1010 
1011 		*count = ARM_DEBUG_STATE32_COUNT;
1012 		break;
1013 	}
1014 
1015 	case ARM_DEBUG_STATE64:{
1016 		arm_debug_state64_t *state;
1017 		arm_debug_state64_t *thread_state;
1018 
1019 		if (*count < ARM_DEBUG_STATE64_COUNT) {
1020 			return KERN_INVALID_ARGUMENT;
1021 		}
1022 
1023 		if (!thread_is_64bit_data(thread)) {
1024 			return KERN_INVALID_ARGUMENT;
1025 		}
1026 
1027 		state = (arm_debug_state64_t *) tstate;
1028 		thread_state = find_debug_state64(thread);
1029 
1030 		if (thread_state == NULL) {
1031 			bzero(state, sizeof(arm_debug_state64_t));
1032 		} else {
1033 			bcopy(thread_state, state, sizeof(arm_debug_state64_t));
1034 		}
1035 
1036 		*count = ARM_DEBUG_STATE64_COUNT;
1037 		break;
1038 	}
1039 
1040 	case ARM_VFP_STATE:{
1041 		struct arm_vfp_state *state;
1042 		arm_neon_saved_state32_t *thread_state;
1043 		unsigned int max;
1044 
1045 		if (*count < ARM_VFP_STATE_COUNT) {
1046 			if (*count < ARM_VFPV2_STATE_COUNT) {
1047 				return KERN_INVALID_ARGUMENT;
1048 			} else {
1049 				*count =  ARM_VFPV2_STATE_COUNT;
1050 			}
1051 		}
1052 
1053 		if (*count == ARM_VFPV2_STATE_COUNT) {
1054 			max = 32;
1055 		} else {
1056 			max = 64;
1057 		}
1058 
1059 		state = (struct arm_vfp_state *) tstate;
1060 		thread_state = neon_state32(thread->machine.uNeon);
1061 		/* ARM64 TODO: set fpsr and fpcr from state->fpscr */
1062 
1063 		bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
1064 		*count = (max + 1);
1065 		break;
1066 	}
1067 	case ARM_NEON_STATE:{
1068 		arm_neon_state_t *state;
1069 		arm_neon_saved_state32_t *thread_state;
1070 
1071 		if (*count < ARM_NEON_STATE_COUNT) {
1072 			return KERN_INVALID_ARGUMENT;
1073 		}
1074 
1075 		if (thread_is_64bit_data(thread)) {
1076 			return KERN_INVALID_ARGUMENT;
1077 		}
1078 
1079 		state = (arm_neon_state_t *)tstate;
1080 		thread_state = neon_state32(thread->machine.uNeon);
1081 
1082 		assert(sizeof(*thread_state) == sizeof(*state));
1083 		bcopy(thread_state, state, sizeof(arm_neon_state_t));
1084 
1085 		*count = ARM_NEON_STATE_COUNT;
1086 		break;
1087 	}
1088 
1089 	case ARM_NEON_STATE64:{
1090 		arm_neon_state64_t *state;
1091 		arm_neon_saved_state64_t *thread_state;
1092 
1093 		if (*count < ARM_NEON_STATE64_COUNT) {
1094 			return KERN_INVALID_ARGUMENT;
1095 		}
1096 
1097 		if (!thread_is_64bit_data(thread)) {
1098 			return KERN_INVALID_ARGUMENT;
1099 		}
1100 
1101 		state = (arm_neon_state64_t *)tstate;
1102 		thread_state = neon_state64(thread->machine.uNeon);
1103 
1104 		/* For now, these are identical */
1105 		assert(sizeof(*state) == sizeof(*thread_state));
1106 		bcopy(thread_state, state, sizeof(arm_neon_state64_t));
1107 
1108 
1109 		*count = ARM_NEON_STATE64_COUNT;
1110 		break;
1111 	}
1112 
1113 
1114 	case ARM_PAGEIN_STATE: {
1115 		arm_pagein_state_t *state;
1116 
1117 		if (*count < ARM_PAGEIN_STATE_COUNT) {
1118 			return KERN_INVALID_ARGUMENT;
1119 		}
1120 
1121 		state = (arm_pagein_state_t *)tstate;
1122 		state->__pagein_error = thread->t_pagein_error;
1123 
1124 		*count = ARM_PAGEIN_STATE_COUNT;
1125 		break;
1126 	}
1127 
1128 
1129 	default:
1130 		return KERN_INVALID_ARGUMENT;
1131 	}
1132 	return KERN_SUCCESS;
1133 }
1134 
1135 
1136 /*
1137  * Routine: machine_thread_get_kern_state
1138  *
1139  */
1140 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1141 machine_thread_get_kern_state(thread_t                 thread,
1142     thread_flavor_t          flavor,
1143     thread_state_t           tstate,
1144     mach_msg_type_number_t * count)
1145 {
1146 	/*
1147 	 * This works only for an interrupted kernel thread
1148 	 */
1149 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1150 		return KERN_FAILURE;
1151 	}
1152 
1153 	switch (flavor) {
1154 	case ARM_THREAD_STATE:
1155 	{
1156 		kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1157 		if (rn) {
1158 			return rn;
1159 		}
1160 		break;
1161 	}
1162 	case ARM_THREAD_STATE32:
1163 	{
1164 		kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1165 		if (rn) {
1166 			return rn;
1167 		}
1168 		break;
1169 	}
1170 #if __arm64__
1171 	case ARM_THREAD_STATE64:
1172 	{
1173 		kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1174 		if (rn) {
1175 			return rn;
1176 		}
1177 		break;
1178 	}
1179 #endif
1180 	default:
1181 		return KERN_INVALID_ARGUMENT;
1182 	}
1183 	return KERN_SUCCESS;
1184 }
1185 
1186 void
machine_thread_switch_addrmode(thread_t thread)1187 machine_thread_switch_addrmode(thread_t thread)
1188 {
1189 	if (task_has_64Bit_data(get_threadtask(thread))) {
1190 		thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
1191 		thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
1192 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1193 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1194 
1195 		/*
1196 		 * Reinitialize the NEON state.
1197 		 */
1198 		bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1199 		thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
1200 	} else {
1201 		thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
1202 		thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
1203 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1204 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1205 
1206 		/*
1207 		 * Reinitialize the NEON state.
1208 		 */
1209 		bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1210 		thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
1211 	}
1212 }
1213 
1214 extern long long arm_debug_get(void);
1215 
1216 /*
1217  * Routine: machine_thread_set_state
1218  *
1219  */
1220 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)1221 machine_thread_set_state(thread_t               thread,
1222     thread_flavor_t        flavor,
1223     thread_state_t         tstate,
1224     mach_msg_type_number_t count)
1225 {
1226 	kern_return_t rn;
1227 
1228 	switch (flavor) {
1229 	case ARM_THREAD_STATE:
1230 		rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
1231 		if (rn) {
1232 			return rn;
1233 		}
1234 		break;
1235 
1236 	case ARM_THREAD_STATE32:
1237 		if (thread_is_64bit_data(thread)) {
1238 			return KERN_INVALID_ARGUMENT;
1239 		}
1240 
1241 		rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
1242 		if (rn) {
1243 			return rn;
1244 		}
1245 		break;
1246 
1247 #if __arm64__
1248 	case ARM_THREAD_STATE64:
1249 		if (!thread_is_64bit_data(thread)) {
1250 			return KERN_INVALID_ARGUMENT;
1251 		}
1252 
1253 
1254 		rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
1255 		if (rn) {
1256 			return rn;
1257 		}
1258 		break;
1259 #endif
1260 	case ARM_EXCEPTION_STATE:{
1261 		if (count != ARM_EXCEPTION_STATE_COUNT) {
1262 			return KERN_INVALID_ARGUMENT;
1263 		}
1264 		if (thread_is_64bit_data(thread)) {
1265 			return KERN_INVALID_ARGUMENT;
1266 		}
1267 
1268 		break;
1269 	}
1270 	case ARM_EXCEPTION_STATE64:{
1271 		if (count != ARM_EXCEPTION_STATE64_COUNT) {
1272 			return KERN_INVALID_ARGUMENT;
1273 		}
1274 		if (!thread_is_64bit_data(thread)) {
1275 			return KERN_INVALID_ARGUMENT;
1276 		}
1277 
1278 		break;
1279 	}
1280 	case ARM_DEBUG_STATE:
1281 	{
1282 		arm_legacy_debug_state_t *state;
1283 		boolean_t enabled = FALSE;
1284 		unsigned int    i;
1285 
1286 		if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
1287 			return KERN_INVALID_ARGUMENT;
1288 		}
1289 		if (thread_is_64bit_data(thread)) {
1290 			return KERN_INVALID_ARGUMENT;
1291 		}
1292 
1293 		state = (arm_legacy_debug_state_t *) tstate;
1294 
1295 		for (i = 0; i < 16; i++) {
1296 			/* do not allow context IDs to be set */
1297 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1298 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1299 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1300 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1301 				return KERN_PROTECTION_FAILURE;
1302 			}
1303 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1304 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1305 				enabled = TRUE;
1306 			}
1307 		}
1308 
1309 		if (!enabled) {
1310 			free_debug_state(thread);
1311 		} else {
1312 			arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread);
1313 
1314 			if (thread_state == NULL) {
1315 				return KERN_FAILURE;
1316 			}
1317 
1318 			for (i = 0; i < 16; i++) {
1319 				/* set appropriate privilege; mask out unknown bits */
1320 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1321 				    | ARM_DBGBCR_MATCH_MASK
1322 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1323 				    | ARM_DBG_CR_ENABLE_MASK))
1324 				    | ARM_DBGBCR_TYPE_IVA
1325 				    | ARM_DBG_CR_LINKED_UNLINKED
1326 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1327 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1328 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1329 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1330 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1331 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1332 				    | ARM_DBG_CR_ENABLE_MASK))
1333 				    | ARM_DBG_CR_LINKED_UNLINKED
1334 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1335 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1336 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1337 			}
1338 
1339 			thread_state->mdscr_el1 = 0ULL;         // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1340 		}
1341 
1342 		if (thread == current_thread()) {
1343 			arm_debug_set32(thread->machine.DebugData);
1344 		}
1345 
1346 		break;
1347 	}
1348 	case ARM_DEBUG_STATE32:
1349 		/* ARM64_TODO  subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1350 	{
1351 		arm_debug_state32_t *state;
1352 		boolean_t enabled = FALSE;
1353 		unsigned int    i;
1354 
1355 		if (count != ARM_DEBUG_STATE32_COUNT) {
1356 			return KERN_INVALID_ARGUMENT;
1357 		}
1358 		if (thread_is_64bit_data(thread)) {
1359 			return KERN_INVALID_ARGUMENT;
1360 		}
1361 
1362 		state = (arm_debug_state32_t *) tstate;
1363 
1364 		if (state->mdscr_el1 & MDSCR_SS) {
1365 			enabled = TRUE;
1366 		}
1367 
1368 		for (i = 0; i < 16; i++) {
1369 			/* do not allow context IDs to be set */
1370 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1371 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1372 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1373 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1374 				return KERN_PROTECTION_FAILURE;
1375 			}
1376 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1377 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1378 				enabled = TRUE;
1379 			}
1380 		}
1381 
1382 		if (!enabled) {
1383 			free_debug_state(thread);
1384 		} else {
1385 			arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread);
1386 
1387 			if (thread_state == NULL) {
1388 				return KERN_FAILURE;
1389 			}
1390 
1391 			if (state->mdscr_el1 & MDSCR_SS) {
1392 				thread_state->mdscr_el1 |= MDSCR_SS;
1393 			} else {
1394 				thread_state->mdscr_el1 &= ~MDSCR_SS;
1395 			}
1396 
1397 			for (i = 0; i < 16; i++) {
1398 				/* set appropriate privilege; mask out unknown bits */
1399 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1400 				    | ARM_DBGBCR_MATCH_MASK
1401 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1402 				    | ARM_DBG_CR_ENABLE_MASK))
1403 				    | ARM_DBGBCR_TYPE_IVA
1404 				    | ARM_DBG_CR_LINKED_UNLINKED
1405 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1406 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1407 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1408 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1409 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1410 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1411 				    | ARM_DBG_CR_ENABLE_MASK))
1412 				    | ARM_DBG_CR_LINKED_UNLINKED
1413 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1414 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1415 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1416 			}
1417 		}
1418 
1419 		if (thread == current_thread()) {
1420 			arm_debug_set32(thread->machine.DebugData);
1421 		}
1422 
1423 		break;
1424 	}
1425 
1426 	case ARM_DEBUG_STATE64:
1427 	{
1428 		arm_debug_state64_t *state;
1429 		boolean_t enabled = FALSE;
1430 		unsigned int i;
1431 
1432 		if (count != ARM_DEBUG_STATE64_COUNT) {
1433 			return KERN_INVALID_ARGUMENT;
1434 		}
1435 		if (!thread_is_64bit_data(thread)) {
1436 			return KERN_INVALID_ARGUMENT;
1437 		}
1438 
1439 		state = (arm_debug_state64_t *) tstate;
1440 
1441 		if (state->mdscr_el1 & MDSCR_SS) {
1442 			enabled = TRUE;
1443 		}
1444 
1445 		for (i = 0; i < 16; i++) {
1446 			/* do not allow context IDs to be set */
1447 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1448 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1449 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1450 				return KERN_PROTECTION_FAILURE;
1451 			}
1452 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1453 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1454 				enabled = TRUE;
1455 			}
1456 		}
1457 
1458 		if (!enabled) {
1459 			free_debug_state(thread);
1460 		} else {
1461 			arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread);
1462 
1463 			if (thread_state == NULL) {
1464 				return KERN_FAILURE;
1465 			}
1466 
1467 			if (state->mdscr_el1 & MDSCR_SS) {
1468 				thread_state->mdscr_el1 |= MDSCR_SS;
1469 			} else {
1470 				thread_state->mdscr_el1 &= ~MDSCR_SS;
1471 			}
1472 
1473 			for (i = 0; i < 16; i++) {
1474 				/* set appropriate privilege; mask out unknown bits */
1475 				thread_state->bcr[i] = (state->bcr[i] & (0         /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1476 				    | 0                             /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1477 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1478 				    | ARM_DBG_CR_ENABLE_MASK))
1479 				    | ARM_DBGBCR_TYPE_IVA
1480 				    | ARM_DBG_CR_LINKED_UNLINKED
1481 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1482 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1483 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1484 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1485 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1486 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1487 				    | ARM_DBG_CR_ENABLE_MASK))
1488 				    | ARM_DBG_CR_LINKED_UNLINKED
1489 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1490 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1491 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1492 			}
1493 		}
1494 
1495 		if (thread == current_thread()) {
1496 			arm_debug_set64(thread->machine.DebugData);
1497 		}
1498 
1499 		break;
1500 	}
1501 
1502 	case ARM_VFP_STATE:{
1503 		struct arm_vfp_state *state;
1504 		arm_neon_saved_state32_t *thread_state;
1505 		unsigned int    max;
1506 
1507 		if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1508 			return KERN_INVALID_ARGUMENT;
1509 		}
1510 
1511 		if (count == ARM_VFPV2_STATE_COUNT) {
1512 			max = 32;
1513 		} else {
1514 			max = 64;
1515 		}
1516 
1517 		state = (struct arm_vfp_state *) tstate;
1518 		thread_state = neon_state32(thread->machine.uNeon);
1519 		/* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1520 
1521 		bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1522 
1523 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1524 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1525 		break;
1526 	}
1527 
1528 	case ARM_NEON_STATE:{
1529 		arm_neon_state_t *state;
1530 		arm_neon_saved_state32_t *thread_state;
1531 
1532 		if (count != ARM_NEON_STATE_COUNT) {
1533 			return KERN_INVALID_ARGUMENT;
1534 		}
1535 
1536 		if (thread_is_64bit_data(thread)) {
1537 			return KERN_INVALID_ARGUMENT;
1538 		}
1539 
1540 		state = (arm_neon_state_t *)tstate;
1541 		thread_state = neon_state32(thread->machine.uNeon);
1542 
1543 		assert(sizeof(*state) == sizeof(*thread_state));
1544 		bcopy(state, thread_state, sizeof(arm_neon_state_t));
1545 
1546 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1547 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1548 		break;
1549 	}
1550 
1551 	case ARM_NEON_STATE64:{
1552 		arm_neon_state64_t *state;
1553 		arm_neon_saved_state64_t *thread_state;
1554 
1555 		if (count != ARM_NEON_STATE64_COUNT) {
1556 			return KERN_INVALID_ARGUMENT;
1557 		}
1558 
1559 		if (!thread_is_64bit_data(thread)) {
1560 			return KERN_INVALID_ARGUMENT;
1561 		}
1562 
1563 		state = (arm_neon_state64_t *)tstate;
1564 		thread_state = neon_state64(thread->machine.uNeon);
1565 
1566 		assert(sizeof(*state) == sizeof(*thread_state));
1567 		bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1568 
1569 
1570 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1571 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1572 		break;
1573 	}
1574 
1575 
1576 	default:
1577 		return KERN_INVALID_ARGUMENT;
1578 	}
1579 	return KERN_SUCCESS;
1580 }
1581 
1582 mach_vm_address_t
machine_thread_pc(thread_t thread)1583 machine_thread_pc(thread_t thread)
1584 {
1585 	struct arm_saved_state *ss = get_user_regs(thread);
1586 	return (mach_vm_address_t)get_saved_state_pc(ss);
1587 }
1588 
1589 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)1590 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1591 {
1592 	set_user_saved_state_pc(get_user_regs(thread), (register_t)pc);
1593 }
1594 
1595 /*
1596  * Routine: machine_thread_state_initialize
1597  *
1598  */
1599 void
machine_thread_state_initialize(thread_t thread)1600 machine_thread_state_initialize(thread_t thread)
1601 {
1602 	arm_context_t *context = thread->machine.contextData;
1603 
1604 	/*
1605 	 * Should always be set up later. For a kernel thread, we don't care
1606 	 * about this state. For a user thread, we'll set the state up in
1607 	 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1608 	 */
1609 
1610 	if (context != NULL) {
1611 		bzero(&context->ss.uss, sizeof(context->ss.uss));
1612 		bzero(&context->ns.uns, sizeof(context->ns.uns));
1613 
1614 		if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1615 			context->ns.ns_64.fpcr = FPCR_DEFAULT;
1616 		} else {
1617 			context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1618 		}
1619 		context->ss.ss_64.cpsr = PSR64_USER64_DEFAULT;
1620 	}
1621 
1622 	thread->machine.DebugData = NULL;
1623 
1624 #if defined(HAS_APPLE_PAC)
1625 	/* Sign the initial user-space thread state */
1626 	if (thread->machine.upcb != NULL) {
1627 		uint64_t intr = ml_pac_safe_interrupts_disable();
1628 		asm volatile (
1629                         "mov	x0, %[iss]"             "\n"
1630                         "mov	x1, #0"                 "\n"
1631                         "mov	w2, %w[usr]"            "\n"
1632                         "mov	x3, #0"                 "\n"
1633                         "mov	x4, #0"                 "\n"
1634                         "mov	x5, #0"                 "\n"
1635                         "msr	SPSel, #1"              "\n"
1636                         VERIFY_USER_THREAD_STATE_INSTR  "\n"
1637                         "mov	x6, lr"                 "\n"
1638                         "bl     _ml_sign_thread_state"  "\n"
1639                         "msr	SPSel, #0"              "\n"
1640                         "mov	lr, x6"                 "\n"
1641                         :
1642                         : [iss] "r"(thread->machine.upcb), [usr] "r"(thread->machine.upcb->ss_64.cpsr),
1643                           VERIFY_USER_THREAD_STATE_INPUTS
1644                         : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
1645                 );
1646 		ml_pac_safe_interrupts_restore(intr);
1647 	}
1648 #endif /* defined(HAS_APPLE_PAC) */
1649 }
1650 
1651 /*
1652  * Routine: machine_thread_dup
1653  *
1654  */
1655 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)1656 machine_thread_dup(thread_t self,
1657     thread_t target,
1658     __unused boolean_t is_corpse)
1659 {
1660 	struct arm_saved_state *self_saved_state;
1661 	struct arm_saved_state *target_saved_state;
1662 
1663 	target->machine.cthread_self = self->machine.cthread_self;
1664 
1665 	self_saved_state = self->machine.upcb;
1666 	target_saved_state = target->machine.upcb;
1667 	bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1668 #if defined(HAS_APPLE_PAC)
1669 	if (!is_corpse && is_saved_state64(self_saved_state)) {
1670 		check_and_sign_copied_user_thread_state(target_saved_state, self_saved_state);
1671 	}
1672 #endif /* defined(HAS_APPLE_PAC) */
1673 
1674 	arm_neon_saved_state_t *self_neon_state = self->machine.uNeon;
1675 	arm_neon_saved_state_t *target_neon_state = target->machine.uNeon;
1676 	bcopy(self_neon_state, target_neon_state, sizeof(*target_neon_state));
1677 
1678 
1679 	return KERN_SUCCESS;
1680 }
1681 
1682 /*
1683  * Routine: get_user_regs
1684  *
1685  */
1686 struct arm_saved_state *
get_user_regs(thread_t thread)1687 get_user_regs(thread_t thread)
1688 {
1689 	return thread->machine.upcb;
1690 }
1691 
1692 arm_neon_saved_state_t *
get_user_neon_regs(thread_t thread)1693 get_user_neon_regs(thread_t thread)
1694 {
1695 	return thread->machine.uNeon;
1696 }
1697 
1698 /*
1699  * Routine: find_user_regs
1700  *
1701  */
1702 struct arm_saved_state *
find_user_regs(thread_t thread)1703 find_user_regs(thread_t thread)
1704 {
1705 	return thread->machine.upcb;
1706 }
1707 
1708 /*
1709  * Routine: find_kern_regs
1710  *
1711  */
1712 struct arm_saved_state *
find_kern_regs(thread_t thread)1713 find_kern_regs(thread_t thread)
1714 {
1715 	/*
1716 	 * This works only for an interrupted kernel thread
1717 	 */
1718 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1719 		return (struct arm_saved_state *) NULL;
1720 	} else {
1721 		return getCpuDatap()->cpu_int_state;
1722 	}
1723 }
1724 
1725 arm_debug_state32_t *
find_debug_state32(thread_t thread)1726 find_debug_state32(thread_t thread)
1727 {
1728 	if (thread && thread->machine.DebugData) {
1729 		return &(thread->machine.DebugData->uds.ds32);
1730 	} else {
1731 		return NULL;
1732 	}
1733 }
1734 
1735 arm_debug_state64_t *
find_debug_state64(thread_t thread)1736 find_debug_state64(thread_t thread)
1737 {
1738 	if (thread && thread->machine.DebugData) {
1739 		return &(thread->machine.DebugData->uds.ds64);
1740 	} else {
1741 		return NULL;
1742 	}
1743 }
1744 
1745 os_refgrp_decl(static, dbg_refgrp, "arm_debug_state", NULL);
1746 
1747 /**
1748  *  Finds the debug state for the given 64 bit thread, allocating one if it
1749  *  does not exist.
1750  *
1751  *  @param thread 64 bit thread to find or allocate debug state for
1752  *
1753  *  @returns A pointer to the given thread's 64 bit debug state or a null
1754  *           pointer if the given thread is null or the allocation of a new
1755  *           debug state fails.
1756  */
1757 arm_debug_state64_t *
find_or_allocate_debug_state64(thread_t thread)1758 find_or_allocate_debug_state64(thread_t thread)
1759 {
1760 	arm_debug_state64_t *thread_state = find_debug_state64(thread);
1761 	if (thread != NULL && thread_state == NULL) {
1762 		thread->machine.DebugData = zalloc_flags(ads_zone,
1763 		    Z_WAITOK | Z_NOFAIL);
1764 		bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1765 		thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1766 		thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1767 		os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1768 		thread_state = find_debug_state64(thread);
1769 	}
1770 	return thread_state;
1771 }
1772 
1773 /**
1774  *  Finds the debug state for the given 32 bit thread, allocating one if it
1775  *  does not exist.
1776  *
1777  *  @param thread 32 bit thread to find or allocate debug state for
1778  *
1779  *  @returns A pointer to the given thread's 32 bit debug state or a null
1780  *           pointer if the given thread is null or the allocation of a new
1781  *           debug state fails.
1782  */
1783 arm_debug_state32_t *
find_or_allocate_debug_state32(thread_t thread)1784 find_or_allocate_debug_state32(thread_t thread)
1785 {
1786 	arm_debug_state32_t *thread_state = find_debug_state32(thread);
1787 	if (thread != NULL && thread_state == NULL) {
1788 		thread->machine.DebugData = zalloc_flags(ads_zone,
1789 		    Z_WAITOK | Z_NOFAIL);
1790 		bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1791 		thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1792 		thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1793 		os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1794 		thread_state = find_debug_state32(thread);
1795 	}
1796 	return thread_state;
1797 }
1798 
1799 /**
1800  *	Frees a thread's debug state if allocated. Otherwise does nothing.
1801  *
1802  *  @param thread thread to free the debug state of
1803  */
1804 static inline void
free_debug_state(thread_t thread)1805 free_debug_state(thread_t thread)
1806 {
1807 	if (thread != NULL && thread->machine.DebugData != NULL) {
1808 		arm_debug_state_t *pTmp = thread->machine.DebugData;
1809 		thread->machine.DebugData = NULL;
1810 
1811 		if (os_ref_release(&pTmp->ref) == 0) {
1812 			zfree(ads_zone, pTmp);
1813 		}
1814 	}
1815 }
1816 
1817 /*
1818  * Routine: thread_userstack
1819  *
1820  */
1821 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,boolean_t is_64bit_data)1822 thread_userstack(__unused thread_t  thread,
1823     int                flavor,
1824     thread_state_t     tstate,
1825     unsigned int       count,
1826     mach_vm_offset_t * user_stack,
1827     int *              customstack,
1828     boolean_t          is_64bit_data
1829     )
1830 {
1831 	register_t sp;
1832 
1833 	switch (flavor) {
1834 	case ARM_THREAD_STATE:
1835 		if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1836 #if __arm64__
1837 			if (is_64bit_data) {
1838 				sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1839 			} else
1840 #endif
1841 			{
1842 				sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1843 			}
1844 
1845 			break;
1846 		}
1847 
1848 		/* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1849 		OS_FALLTHROUGH;
1850 	case ARM_THREAD_STATE32:
1851 		if (count != ARM_THREAD_STATE32_COUNT) {
1852 			return KERN_INVALID_ARGUMENT;
1853 		}
1854 		if (is_64bit_data) {
1855 			return KERN_INVALID_ARGUMENT;
1856 		}
1857 
1858 		sp = ((arm_thread_state32_t *)tstate)->sp;
1859 		break;
1860 #if __arm64__
1861 	case ARM_THREAD_STATE64:
1862 		if (count != ARM_THREAD_STATE64_COUNT) {
1863 			return KERN_INVALID_ARGUMENT;
1864 		}
1865 		if (!is_64bit_data) {
1866 			return KERN_INVALID_ARGUMENT;
1867 		}
1868 
1869 		sp = ((arm_thread_state32_t *)tstate)->sp;
1870 		break;
1871 #endif
1872 	default:
1873 		return KERN_INVALID_ARGUMENT;
1874 	}
1875 
1876 	if (sp) {
1877 		*user_stack = CAST_USER_ADDR_T(sp);
1878 		if (customstack) {
1879 			*customstack = 1;
1880 		}
1881 	} else {
1882 		*user_stack = CAST_USER_ADDR_T(USRSTACK64);
1883 		if (customstack) {
1884 			*customstack = 0;
1885 		}
1886 	}
1887 
1888 	return KERN_SUCCESS;
1889 }
1890 
1891 /*
1892  * thread_userstackdefault:
1893  *
1894  * Return the default stack location for the
1895  * thread, if otherwise unknown.
1896  */
1897 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)1898 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1899     boolean_t          is64bit)
1900 {
1901 	if (is64bit) {
1902 		*default_user_stack = USRSTACK64;
1903 	} else {
1904 		*default_user_stack = USRSTACK;
1905 	}
1906 
1907 	return KERN_SUCCESS;
1908 }
1909 
1910 /*
1911  * Routine: thread_setuserstack
1912  *
1913  */
1914 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)1915 thread_setuserstack(thread_t          thread,
1916     mach_vm_address_t user_stack)
1917 {
1918 	struct arm_saved_state *sv;
1919 
1920 	sv = get_user_regs(thread);
1921 
1922 	set_saved_state_sp(sv, user_stack);
1923 
1924 	return;
1925 }
1926 
1927 /*
1928  * Routine: thread_adjuserstack
1929  *
1930  */
1931 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)1932 thread_adjuserstack(thread_t thread,
1933     int      adjust)
1934 {
1935 	struct arm_saved_state *sv;
1936 	uint64_t sp;
1937 
1938 	sv = get_user_regs(thread);
1939 
1940 	sp = get_saved_state_sp(sv);
1941 	sp += adjust;
1942 	set_saved_state_sp(sv, sp);
1943 
1944 	return sp;
1945 }
1946 
1947 
1948 /*
1949  * Routine: thread_setentrypoint
1950  *
1951  */
1952 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)1953 thread_setentrypoint(thread_t         thread,
1954     mach_vm_offset_t entry)
1955 {
1956 	struct arm_saved_state *sv;
1957 
1958 #if HAS_APPLE_PAC
1959 	uint64_t intr = ml_pac_safe_interrupts_disable();
1960 #endif
1961 
1962 	sv = get_user_regs(thread);
1963 
1964 	set_user_saved_state_pc(sv, entry);
1965 
1966 #if HAS_APPLE_PAC
1967 	ml_pac_safe_interrupts_restore(intr);
1968 #endif
1969 
1970 	return;
1971 }
1972 
1973 /*
1974  * Routine: thread_entrypoint
1975  *
1976  */
1977 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)1978 thread_entrypoint(__unused thread_t  thread,
1979     int                flavor,
1980     thread_state_t     tstate,
1981     unsigned int       count,
1982     mach_vm_offset_t * entry_point
1983     )
1984 {
1985 	switch (flavor) {
1986 	case ARM_THREAD_STATE:
1987 	{
1988 		struct arm_thread_state *state;
1989 
1990 		if (count != ARM_THREAD_STATE_COUNT) {
1991 			return KERN_INVALID_ARGUMENT;
1992 		}
1993 
1994 		state = (struct arm_thread_state *) tstate;
1995 
1996 		/*
1997 		 * If a valid entry point is specified, use it.
1998 		 */
1999 		if (state->pc) {
2000 			*entry_point = CAST_USER_ADDR_T(state->pc);
2001 		} else {
2002 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
2003 		}
2004 	}
2005 	break;
2006 
2007 	case ARM_THREAD_STATE64:
2008 	{
2009 		struct arm_thread_state64 *state;
2010 
2011 		if (count != ARM_THREAD_STATE64_COUNT) {
2012 			return KERN_INVALID_ARGUMENT;
2013 		}
2014 
2015 		state = (struct arm_thread_state64*) tstate;
2016 
2017 		/*
2018 		 * If a valid entry point is specified, use it.
2019 		 */
2020 		if (state->pc) {
2021 			*entry_point = CAST_USER_ADDR_T(state->pc);
2022 		} else {
2023 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
2024 		}
2025 
2026 		break;
2027 	}
2028 	default:
2029 		return KERN_INVALID_ARGUMENT;
2030 	}
2031 
2032 	return KERN_SUCCESS;
2033 }
2034 
2035 
2036 /*
2037  * Routine: thread_set_child
2038  *
2039  */
2040 void
thread_set_child(thread_t child,int pid)2041 thread_set_child(thread_t child,
2042     int      pid)
2043 {
2044 	struct arm_saved_state *child_state;
2045 
2046 	child_state = get_user_regs(child);
2047 
2048 	set_user_saved_state_reg(child_state, 0, pid);
2049 	set_user_saved_state_reg(child_state, 1, 1ULL);
2050 }
2051 
2052 
2053 struct arm_act_context {
2054 	struct arm_unified_thread_state ss;
2055 #if __ARM_VFP__
2056 	struct arm_neon_saved_state ns;
2057 #endif
2058 };
2059 
2060 /*
2061  * Routine: act_thread_csave
2062  *
2063  */
2064 void *
act_thread_csave(void)2065 act_thread_csave(void)
2066 {
2067 	struct arm_act_context *ic;
2068 	kern_return_t   kret;
2069 	unsigned int    val;
2070 	thread_t thread = current_thread();
2071 
2072 	ic = kalloc_type(struct arm_act_context, Z_WAITOK);
2073 	if (ic == (struct arm_act_context *) NULL) {
2074 		return (void *) 0;
2075 	}
2076 
2077 	val = ARM_UNIFIED_THREAD_STATE_COUNT;
2078 	kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
2079 	if (kret != KERN_SUCCESS) {
2080 		kfree_type(struct arm_act_context, ic);
2081 		return (void *) 0;
2082 	}
2083 
2084 #if __ARM_VFP__
2085 	if (thread_is_64bit_data(thread)) {
2086 		val = ARM_NEON_STATE64_COUNT;
2087 		kret = machine_thread_get_state(thread,
2088 		    ARM_NEON_STATE64,
2089 		    (thread_state_t)&ic->ns,
2090 		    &val);
2091 	} else {
2092 		val = ARM_NEON_STATE_COUNT;
2093 		kret = machine_thread_get_state(thread,
2094 		    ARM_NEON_STATE,
2095 		    (thread_state_t)&ic->ns,
2096 		    &val);
2097 	}
2098 	if (kret != KERN_SUCCESS) {
2099 		kfree_type(struct arm_act_context, ic);
2100 		return (void *) 0;
2101 	}
2102 #endif
2103 	return ic;
2104 }
2105 
2106 /*
2107  * Routine: act_thread_catt
2108  *
2109  */
2110 void
act_thread_catt(void * ctx)2111 act_thread_catt(void * ctx)
2112 {
2113 	struct arm_act_context *ic;
2114 	kern_return_t   kret;
2115 	thread_t thread = current_thread();
2116 
2117 	ic = (struct arm_act_context *) ctx;
2118 	if (ic == (struct arm_act_context *) NULL) {
2119 		return;
2120 	}
2121 
2122 	kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
2123 	if (kret != KERN_SUCCESS) {
2124 		goto out;
2125 	}
2126 
2127 #if __ARM_VFP__
2128 	if (thread_is_64bit_data(thread)) {
2129 		kret = machine_thread_set_state(thread,
2130 		    ARM_NEON_STATE64,
2131 		    (thread_state_t)&ic->ns,
2132 		    ARM_NEON_STATE64_COUNT);
2133 	} else {
2134 		kret = machine_thread_set_state(thread,
2135 		    ARM_NEON_STATE,
2136 		    (thread_state_t)&ic->ns,
2137 		    ARM_NEON_STATE_COUNT);
2138 	}
2139 	if (kret != KERN_SUCCESS) {
2140 		goto out;
2141 	}
2142 #endif
2143 out:
2144 	kfree_type(struct arm_act_context, ic);
2145 }
2146 
2147 /*
2148  * Routine: act_thread_catt
2149  *
2150  */
2151 void
act_thread_cfree(void * ctx)2152 act_thread_cfree(void *ctx)
2153 {
2154 	kfree_type(struct arm_act_context, ctx);
2155 }
2156 
2157 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)2158 thread_set_wq_state32(thread_t       thread,
2159     thread_state_t tstate)
2160 {
2161 	arm_thread_state_t *state;
2162 	struct arm_saved_state *saved_state;
2163 	struct arm_saved_state32 *saved_state_32;
2164 	thread_t curth = current_thread();
2165 	spl_t s = 0;
2166 
2167 	assert(!thread_is_64bit_data(thread));
2168 
2169 	saved_state = thread->machine.upcb;
2170 	saved_state_32 = saved_state32(saved_state);
2171 
2172 	state = (arm_thread_state_t *)tstate;
2173 
2174 	if (curth != thread) {
2175 		s = splsched();
2176 		thread_lock(thread);
2177 	}
2178 
2179 	/*
2180 	 * do not zero saved_state, it can be concurrently accessed
2181 	 * and zero is not a valid state for some of the registers,
2182 	 * like sp.
2183 	 */
2184 	thread_state32_to_saved_state(state, saved_state);
2185 	saved_state_32->cpsr = PSR64_USER32_DEFAULT;
2186 
2187 	if (curth != thread) {
2188 		thread_unlock(thread);
2189 		splx(s);
2190 	}
2191 
2192 	return KERN_SUCCESS;
2193 }
2194 
2195 kern_return_t
thread_set_wq_state64(thread_t thread,thread_state_t tstate)2196 thread_set_wq_state64(thread_t       thread,
2197     thread_state_t tstate)
2198 {
2199 	arm_thread_state64_t *state;
2200 	struct arm_saved_state *saved_state;
2201 	struct arm_saved_state64 *saved_state_64;
2202 	thread_t curth = current_thread();
2203 	spl_t s = 0;
2204 
2205 	assert(thread_is_64bit_data(thread));
2206 
2207 	saved_state = thread->machine.upcb;
2208 	saved_state_64 = saved_state64(saved_state);
2209 	state = (arm_thread_state64_t *)tstate;
2210 
2211 	if (curth != thread) {
2212 		s = splsched();
2213 		thread_lock(thread);
2214 	}
2215 
2216 	/*
2217 	 * do not zero saved_state, it can be concurrently accessed
2218 	 * and zero is not a valid state for some of the registers,
2219 	 * like sp.
2220 	 */
2221 	thread_state64_to_saved_state(state, saved_state);
2222 	set_user_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
2223 
2224 	if (curth != thread) {
2225 		thread_unlock(thread);
2226 		splx(s);
2227 	}
2228 
2229 	return KERN_SUCCESS;
2230 }
2231