xref: /xnu-10002.1.13/osfmk/arm64/status.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/misc_protos.h>
37 #include <arm64/machine_machdep.h>
38 #include <arm64/proc_reg.h>
39 #include <sys/random.h>
40 #if __has_feature(ptrauth_calls)
41 #include <ptrauth.h>
42 #endif
43 
44 #include <libkern/coreanalytics/coreanalytics.h>
45 
46 
47 struct arm_vfpv2_state {
48 	__uint32_t __r[32];
49 	__uint32_t __fpscr;
50 };
51 
52 typedef struct arm_vfpv2_state arm_vfpv2_state_t;
53 
54 #define ARM_VFPV2_STATE_COUNT \
55 	((mach_msg_type_number_t)(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
56 
57 /*
58  * Forward definitions
59  */
60 void thread_set_child(thread_t child, int pid);
61 void thread_set_parent(thread_t parent, int pid);
62 static void free_debug_state(thread_t thread);
63 user_addr_t thread_get_sigreturn_token(thread_t thread);
64 uint32_t thread_get_sigreturn_diversifier(thread_t thread);
65 
66 /*
67  * Maps state flavor to number of words in the state:
68  */
69 /* __private_extern__ */
70 unsigned int _MachineStateCount[] = {
71 	[ARM_UNIFIED_THREAD_STATE] = ARM_UNIFIED_THREAD_STATE_COUNT,
72 	[ARM_VFP_STATE] = ARM_VFP_STATE_COUNT,
73 	[ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
74 	[ARM_DEBUG_STATE] = ARM_DEBUG_STATE_COUNT,
75 	[ARM_THREAD_STATE64] = ARM_THREAD_STATE64_COUNT,
76 	[ARM_EXCEPTION_STATE64] = ARM_EXCEPTION_STATE64_COUNT,
77 	[ARM_THREAD_STATE32] = ARM_THREAD_STATE32_COUNT,
78 	[ARM_DEBUG_STATE32] = ARM_DEBUG_STATE32_COUNT,
79 	[ARM_DEBUG_STATE64] = ARM_DEBUG_STATE64_COUNT,
80 	[ARM_NEON_STATE] = ARM_NEON_STATE_COUNT,
81 	[ARM_NEON_STATE64] = ARM_NEON_STATE64_COUNT,
82 	[ARM_PAGEIN_STATE] = ARM_PAGEIN_STATE_COUNT,
83 };
84 
85 extern zone_t ads_zone;
86 
87 #if __arm64__
88 /*
89  * Copy values from saved_state to ts64.
90  */
91 void
saved_state_to_thread_state64(const arm_saved_state_t * saved_state,arm_thread_state64_t * ts64)92 saved_state_to_thread_state64(const arm_saved_state_t * saved_state,
93     arm_thread_state64_t *    ts64)
94 {
95 	uint32_t i;
96 
97 	assert(is_saved_state64(saved_state));
98 
99 	ts64->fp = get_saved_state_fp(saved_state);
100 	ts64->lr = get_saved_state_lr(saved_state);
101 	ts64->sp = get_saved_state_sp(saved_state);
102 	ts64->pc = get_saved_state_pc(saved_state);
103 	ts64->cpsr = get_saved_state_cpsr(saved_state);
104 	for (i = 0; i < 29; i++) {
105 		ts64->x[i] = get_saved_state_reg(saved_state, i);
106 	}
107 }
108 
109 /*
110  * Copy values from ts64 to saved_state.
111  *
112  * For safety, CPSR is sanitized as follows:
113  *
114  * - ts64->cpsr.{N,Z,C,V} are copied as-is into saved_state->cpsr
115  * - ts64->cpsr.M is ignored, and saved_state->cpsr.M is reset to EL0
116  * - All other saved_state->cpsr bits are preserved as-is
117  */
118 void
thread_state64_to_saved_state(const arm_thread_state64_t * ts64,arm_saved_state_t * saved_state)119 thread_state64_to_saved_state(const arm_thread_state64_t * ts64,
120     arm_saved_state_t *          saved_state)
121 {
122 	uint32_t i;
123 #if __has_feature(ptrauth_calls)
124 	uint64_t intr = ml_pac_safe_interrupts_disable();
125 #endif /* __has_feature(ptrauth_calls) */
126 
127 	assert(is_saved_state64(saved_state));
128 
129 	const uint32_t CPSR_COPY_MASK = PSR64_USER_MASK;
130 	const uint32_t CPSR_ZERO_MASK = PSR64_MODE_MASK;
131 	const uint32_t CPSR_PRESERVE_MASK = ~(CPSR_COPY_MASK | CPSR_ZERO_MASK);
132 #if __has_feature(ptrauth_calls)
133 	/* BEGIN IGNORE CODESTYLE */
134 	MANIPULATE_SIGNED_THREAD_STATE(saved_state,
135 		"and	w2, w2, %w[preserve_mask]"	"\n"
136 		"mov	w6, %w[cpsr]"			"\n"
137 		"and	w6, w6, %w[copy_mask]"		"\n"
138 		"orr	w2, w2, w6"			"\n"
139 		"str	w2, [x0, %[SS64_CPSR]]"		"\n",
140 		[cpsr] "r"(ts64->cpsr),
141 		[preserve_mask] "i"(CPSR_PRESERVE_MASK),
142 		[copy_mask] "i"(CPSR_COPY_MASK)
143 	);
144 	/* END IGNORE CODESTYLE */
145 	/*
146 	 * Make writes to ts64->cpsr visible first, since it's useful as a
147 	 * canary to detect thread-state corruption.
148 	 */
149 	__builtin_arm_dmb(DMB_ST);
150 #else
151 	uint32_t new_cpsr = get_saved_state_cpsr(saved_state);
152 	new_cpsr &= CPSR_PRESERVE_MASK;
153 	new_cpsr |= (ts64->cpsr & CPSR_COPY_MASK);
154 	set_saved_state_cpsr(saved_state, new_cpsr);
155 #endif /* __has_feature(ptrauth_calls) */
156 	set_saved_state_fp(saved_state, ts64->fp);
157 	set_saved_state_lr(saved_state, ts64->lr);
158 	set_saved_state_sp(saved_state, ts64->sp);
159 	set_saved_state_pc(saved_state, ts64->pc);
160 	for (i = 0; i < 29; i++) {
161 		set_saved_state_reg(saved_state, i, ts64->x[i]);
162 	}
163 
164 #if __has_feature(ptrauth_calls)
165 	ml_pac_safe_interrupts_restore(intr);
166 #endif /* __has_feature(ptrauth_calls) */
167 }
168 
169 #endif /* __arm64__ */
170 
171 static kern_return_t
handle_get_arm32_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)172 handle_get_arm32_thread_state(thread_state_t            tstate,
173     mach_msg_type_number_t *  count,
174     const arm_saved_state_t * saved_state)
175 {
176 	if (*count < ARM_THREAD_STATE32_COUNT) {
177 		return KERN_INVALID_ARGUMENT;
178 	}
179 	if (!is_saved_state32(saved_state)) {
180 		return KERN_INVALID_ARGUMENT;
181 	}
182 
183 	(void)saved_state_to_thread_state32(saved_state, (arm_thread_state32_t *)tstate);
184 	*count = ARM_THREAD_STATE32_COUNT;
185 	return KERN_SUCCESS;
186 }
187 
188 static kern_return_t
handle_get_arm64_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)189 handle_get_arm64_thread_state(thread_state_t            tstate,
190     mach_msg_type_number_t *  count,
191     const arm_saved_state_t * saved_state)
192 {
193 	if (*count < ARM_THREAD_STATE64_COUNT) {
194 		return KERN_INVALID_ARGUMENT;
195 	}
196 	if (!is_saved_state64(saved_state)) {
197 		return KERN_INVALID_ARGUMENT;
198 	}
199 
200 	(void)saved_state_to_thread_state64(saved_state, (arm_thread_state64_t *)tstate);
201 	*count = ARM_THREAD_STATE64_COUNT;
202 	return KERN_SUCCESS;
203 }
204 
205 
206 static kern_return_t
handle_get_arm_thread_state(thread_state_t tstate,mach_msg_type_number_t * count,const arm_saved_state_t * saved_state)207 handle_get_arm_thread_state(thread_state_t            tstate,
208     mach_msg_type_number_t *  count,
209     const arm_saved_state_t * saved_state)
210 {
211 	/* In an arm64 world, this flavor can be used to retrieve the thread
212 	 * state of a 32-bit or 64-bit thread into a unified structure, but we
213 	 * need to support legacy clients who are only aware of 32-bit, so
214 	 * check the count to see what the client is expecting.
215 	 */
216 	if (*count < ARM_UNIFIED_THREAD_STATE_COUNT) {
217 		return handle_get_arm32_thread_state(tstate, count, saved_state);
218 	}
219 
220 	arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *) tstate;
221 	bzero(unified_state, sizeof(*unified_state));
222 #if __arm64__
223 	if (is_saved_state64(saved_state)) {
224 		unified_state->ash.flavor = ARM_THREAD_STATE64;
225 		unified_state->ash.count = ARM_THREAD_STATE64_COUNT;
226 		(void)saved_state_to_thread_state64(saved_state, thread_state64(unified_state));
227 	} else
228 #endif
229 	{
230 		unified_state->ash.flavor = ARM_THREAD_STATE32;
231 		unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
232 		(void)saved_state_to_thread_state32(saved_state, thread_state32(unified_state));
233 	}
234 	*count = ARM_UNIFIED_THREAD_STATE_COUNT;
235 	return KERN_SUCCESS;
236 }
237 
238 
239 static kern_return_t
handle_set_arm32_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)240 handle_set_arm32_thread_state(const thread_state_t   tstate,
241     mach_msg_type_number_t count,
242     arm_saved_state_t *    saved_state)
243 {
244 	if (count != ARM_THREAD_STATE32_COUNT) {
245 		return KERN_INVALID_ARGUMENT;
246 	}
247 
248 	(void)thread_state32_to_saved_state((const arm_thread_state32_t *)tstate, saved_state);
249 	return KERN_SUCCESS;
250 }
251 
252 static kern_return_t
handle_set_arm64_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)253 handle_set_arm64_thread_state(const thread_state_t   tstate,
254     mach_msg_type_number_t count,
255     arm_saved_state_t *    saved_state)
256 {
257 	if (count != ARM_THREAD_STATE64_COUNT) {
258 		return KERN_INVALID_ARGUMENT;
259 	}
260 
261 	(void)thread_state64_to_saved_state((const arm_thread_state64_t *)tstate, saved_state);
262 	return KERN_SUCCESS;
263 }
264 
265 
266 static kern_return_t
handle_set_arm_thread_state(const thread_state_t tstate,mach_msg_type_number_t count,arm_saved_state_t * saved_state)267 handle_set_arm_thread_state(const thread_state_t   tstate,
268     mach_msg_type_number_t count,
269     arm_saved_state_t *    saved_state)
270 {
271 	/* In an arm64 world, this flavor can be used to set the thread state of a
272 	 * 32-bit or 64-bit thread from a unified structure, but we need to support
273 	 * legacy clients who are only aware of 32-bit, so check the count to see
274 	 * what the client is expecting.
275 	 */
276 	if (count < ARM_UNIFIED_THREAD_STATE_COUNT) {
277 		if (!is_saved_state32(saved_state)) {
278 			return KERN_INVALID_ARGUMENT;
279 		}
280 		return handle_set_arm32_thread_state(tstate, count, saved_state);
281 	}
282 
283 	const arm_unified_thread_state_t *unified_state = (const arm_unified_thread_state_t *) tstate;
284 #if __arm64__
285 	if (is_thread_state64(unified_state)) {
286 		if (!is_saved_state64(saved_state)) {
287 			return KERN_INVALID_ARGUMENT;
288 		}
289 		(void)thread_state64_to_saved_state(const_thread_state64(unified_state), saved_state);
290 	} else
291 #endif
292 	{
293 		if (!is_saved_state32(saved_state)) {
294 			return KERN_INVALID_ARGUMENT;
295 		}
296 		(void)thread_state32_to_saved_state(const_thread_state32(unified_state), saved_state);
297 	}
298 
299 	return KERN_SUCCESS;
300 }
301 
302 
303 #if __has_feature(ptrauth_calls)
304 
305 static inline uint32_t
thread_generate_sigreturn_token(void * ptr,thread_t thread)306 thread_generate_sigreturn_token(
307 	void *ptr,
308 	thread_t thread)
309 {
310 	user64_addr_t token = (user64_addr_t)ptr;
311 	token ^= (user64_addr_t)thread_get_sigreturn_token(thread);
312 	token = (user64_addr_t)pmap_sign_user_ptr((void*)token,
313 	    ptrauth_key_process_independent_data, ptrauth_string_discriminator("nonce"),
314 	    thread->machine.jop_pid);
315 	token >>= 32;
316 	return (uint32_t)token;
317 }
318 #endif //__has_feature(ptrauth_calls)
319 
320 /*
321  * Translate thread state arguments to userspace representation
322  */
323 
324 kern_return_t
machine_thread_state_convert_to_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t tssf_flags)325 machine_thread_state_convert_to_user(
326 	thread_t thread,
327 	thread_flavor_t flavor,
328 	thread_state_t tstate,
329 	mach_msg_type_number_t *count,
330 	thread_set_status_flags_t tssf_flags)
331 {
332 #if __has_feature(ptrauth_calls)
333 	arm_thread_state64_t *ts64;
334 	bool preserve_flags = !!(tssf_flags & TSSF_PRESERVE_FLAGS);
335 	bool stash_sigreturn_token = !!(tssf_flags & TSSF_STASH_SIGRETURN_TOKEN);
336 	bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
337 	bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
338 	uint32_t old_flags;
339 	bool kernel_signed_pc = true;
340 	bool kernel_signed_lr = true;
341 	uint32_t userland_diversifier = 0;
342 
343 	switch (flavor) {
344 	case ARM_THREAD_STATE:
345 	{
346 		arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
347 
348 		if (*count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
349 			return KERN_SUCCESS;
350 		}
351 		ts64 = thread_state64(unified_state);
352 		break;
353 	}
354 	case ARM_THREAD_STATE64:
355 	{
356 		if (*count < ARM_THREAD_STATE64_COUNT) {
357 			return KERN_SUCCESS;
358 		}
359 		ts64 = (arm_thread_state64_t *)tstate;
360 		break;
361 	}
362 	default:
363 		return KERN_SUCCESS;
364 	}
365 
366 	// Note that kernel threads never have disable_user_jop set
367 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
368 	    !thread_is_64bit_addr(current_thread()) ||
369 	    (thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) || !thread_is_64bit_addr(thread)
370 	    ) {
371 		ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
372 		return KERN_SUCCESS;
373 	}
374 
375 	old_flags = ts64->flags;
376 	ts64->flags = 0;
377 	if (ts64->lr) {
378 		// lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
379 		uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
380 		    ptrauth_key_return_address);
381 		if (ts64->lr != stripped_lr) {
382 			// Need to allow already-signed lr value to round-trip as is
383 			ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
384 		}
385 		// Note that an IB-signed return address that happens to have a 0 signature value
386 		// will round-trip correctly even if IA-signed again below (and IA-authd later)
387 	}
388 
389 	if (arm_user_jop_disabled()) {
390 		return KERN_SUCCESS;
391 	}
392 
393 	if (preserve_flags) {
394 		assert(random_div == false);
395 		assert(thread_div == false);
396 
397 		/* Restore the diversifier and other opaque flags */
398 		ts64->flags |= (old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
399 		userland_diversifier = old_flags & __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
400 		if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC)) {
401 			kernel_signed_pc = false;
402 		}
403 		if (!(old_flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR)) {
404 			kernel_signed_lr = false;
405 		}
406 	} else {
407 		/* Set a non zero userland diversifier */
408 		if (random_div) {
409 			do {
410 				read_random(&userland_diversifier, sizeof(userland_diversifier));
411 				userland_diversifier &=
412 				    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
413 			} while (userland_diversifier == 0);
414 		} else if (thread_div) {
415 			userland_diversifier = thread_get_sigreturn_diversifier(thread) &
416 			    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK;
417 		}
418 		ts64->flags |= userland_diversifier;
419 	}
420 
421 	if (kernel_signed_pc) {
422 		ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC;
423 	}
424 
425 	if (kernel_signed_lr) {
426 		ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR;
427 	}
428 
429 
430 	if (ts64->pc) {
431 		uint64_t discriminator = ptrauth_string_discriminator("pc");
432 		if (!kernel_signed_pc && userland_diversifier != 0) {
433 			discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
434 			    ptrauth_string_discriminator("pc"));
435 		}
436 
437 		ts64->pc = (uintptr_t)pmap_sign_user_ptr((void*)ts64->pc,
438 		    ptrauth_key_process_independent_code, discriminator,
439 		    thread->machine.jop_pid);
440 	}
441 	if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
442 		uint64_t discriminator = ptrauth_string_discriminator("lr");
443 		if (!kernel_signed_lr && userland_diversifier != 0) {
444 			discriminator = ptrauth_blend_discriminator((void *)(long)userland_diversifier,
445 			    ptrauth_string_discriminator("lr"));
446 		}
447 
448 		ts64->lr = (uintptr_t)pmap_sign_user_ptr((void*)ts64->lr,
449 		    ptrauth_key_process_independent_code, discriminator,
450 		    thread->machine.jop_pid);
451 	}
452 	if (ts64->sp) {
453 		ts64->sp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->sp,
454 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
455 		    thread->machine.jop_pid);
456 	}
457 	if (ts64->fp) {
458 		ts64->fp = (uintptr_t)pmap_sign_user_ptr((void*)ts64->fp,
459 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
460 		    thread->machine.jop_pid);
461 	}
462 
463 	/* Stash the sigreturn token */
464 	if (stash_sigreturn_token) {
465 		if (kernel_signed_pc) {
466 			uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
467 			__DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
468 			    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK);
469 		}
470 
471 		if (kernel_signed_lr) {
472 			uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
473 			__DARWIN_ARM_THREAD_STATE64_SET_SIGRETURN_TOKEN(ts64, token,
474 			    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK);
475 		}
476 	}
477 
478 	return KERN_SUCCESS;
479 #else
480 	// No conversion to userspace representation on this platform
481 	(void)thread; (void)flavor; (void)tstate; (void)count; (void)tssf_flags;
482 	return KERN_SUCCESS;
483 #endif /* __has_feature(ptrauth_calls) */
484 }
485 
486 #if __has_feature(ptrauth_calls)
487 extern char *   proc_name_address(void *p);
488 
489 CA_EVENT(pac_thread_state_exception_event,
490     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
491 
492 static void
machine_thread_state_check_pac_state(arm_thread_state64_t * ts64,arm_thread_state64_t * old_ts64)493 machine_thread_state_check_pac_state(
494 	arm_thread_state64_t *ts64,
495 	arm_thread_state64_t *old_ts64)
496 {
497 	bool send_event = false;
498 	task_t task = current_task();
499 	void *proc = get_bsdtask_info(task);
500 	char *proc_name = (char *) "unknown";
501 
502 	if (((ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC) &&
503 	    ts64->pc != old_ts64->pc) || (!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
504 	    (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR) && (ts64->lr != old_ts64->lr ||
505 	    (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)))) {
506 		send_event = true;
507 	}
508 
509 	if (!send_event) {
510 		return;
511 	}
512 
513 	proc_name = proc_name_address(proc);
514 	ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_exception_event);
515 	CA_EVENT_TYPE(pac_thread_state_exception_event) * pexc_event = ca_event->data;
516 	strlcpy(pexc_event->proc_name, proc_name, CA_PROCNAME_LEN);
517 	CA_EVENT_SEND(ca_event);
518 }
519 
520 CA_EVENT(pac_thread_state_sigreturn_event,
521     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
522 
523 static bool
machine_thread_state_check_sigreturn_token(arm_thread_state64_t * ts64,thread_t thread)524 machine_thread_state_check_sigreturn_token(
525 	arm_thread_state64_t *ts64,
526 	thread_t thread)
527 {
528 	task_t task = current_task();
529 	void *proc = get_bsdtask_info(task);
530 	char *proc_name = (char *) "unknown";
531 	bool token_matched = true;
532 	bool kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
533 	bool kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
534 
535 	if (kernel_signed_pc) {
536 		/* Compute the sigreturn token */
537 		uint32_t token = thread_generate_sigreturn_token((void *)ts64->pc, thread);
538 		if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
539 		    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_PC_MASK)) {
540 			token_matched = false;
541 		}
542 	}
543 
544 	if (kernel_signed_lr) {
545 		/* Compute the sigreturn token */
546 		uint32_t token = thread_generate_sigreturn_token((void *)ts64->lr, thread);
547 		if (!__DARWIN_ARM_THREAD_STATE64_CHECK_SIGRETURN_TOKEN(ts64, token,
548 		    __DARWIN_ARM_THREAD_STATE64_SIGRETURN_LR_MASK)) {
549 			token_matched = false;
550 		}
551 	}
552 
553 	if (token_matched) {
554 		return true;
555 	}
556 
557 	proc_name = proc_name_address(proc);
558 	ca_event_t ca_event = CA_EVENT_ALLOCATE(pac_thread_state_sigreturn_event);
559 	CA_EVENT_TYPE(pac_thread_state_sigreturn_event) * psig_event = ca_event->data;
560 	strlcpy(psig_event->proc_name, proc_name, CA_PROCNAME_LEN);
561 	CA_EVENT_SEND(ca_event);
562 	return false;
563 }
564 
565 #endif
566 
567 /*
568  * Translate thread state arguments from userspace representation
569  */
570 
571 kern_return_t
machine_thread_state_convert_from_user(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t tssf_flags)572 machine_thread_state_convert_from_user(
573 	thread_t thread,
574 	thread_flavor_t flavor,
575 	thread_state_t tstate,
576 	mach_msg_type_number_t count,
577 	thread_state_t old_tstate,
578 	mach_msg_type_number_t old_count,
579 	thread_set_status_flags_t tssf_flags)
580 {
581 #if __has_feature(ptrauth_calls)
582 	arm_thread_state64_t *ts64;
583 	arm_thread_state64_t *old_ts64 = NULL;
584 	void *userland_diversifier = NULL;
585 	bool kernel_signed_pc;
586 	bool kernel_signed_lr;
587 	bool random_div = !!(tssf_flags & TSSF_RANDOM_USER_DIV);
588 	bool thread_div = !!(tssf_flags & TSSF_THREAD_USER_DIV);
589 
590 	switch (flavor) {
591 	case ARM_THREAD_STATE:
592 	{
593 		arm_unified_thread_state_t *unified_state = (arm_unified_thread_state_t *)tstate;
594 
595 		if (count < ARM_UNIFIED_THREAD_STATE_COUNT || !is_thread_state64(unified_state)) {
596 			return KERN_SUCCESS;
597 		}
598 		ts64 = thread_state64(unified_state);
599 
600 		arm_unified_thread_state_t *old_unified_state = (arm_unified_thread_state_t *)old_tstate;
601 		if (old_unified_state && old_count >= ARM_UNIFIED_THREAD_STATE_COUNT) {
602 			old_ts64 = thread_state64(old_unified_state);
603 		}
604 		break;
605 	}
606 	case ARM_THREAD_STATE64:
607 	{
608 		if (count != ARM_THREAD_STATE64_COUNT) {
609 			return KERN_SUCCESS;
610 		}
611 		ts64 = (arm_thread_state64_t *)tstate;
612 
613 		if (old_count == ARM_THREAD_STATE64_COUNT) {
614 			old_ts64 = (arm_thread_state64_t *)old_tstate;
615 		}
616 		break;
617 	}
618 	default:
619 		return KERN_SUCCESS;
620 	}
621 
622 	// Note that kernel threads never have disable_user_jop set
623 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
624 	    !thread_is_64bit_addr(current_thread())) {
625 		if ((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
626 		    !thread_is_64bit_addr(thread)) {
627 			ts64->flags = __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
628 			return KERN_SUCCESS;
629 		}
630 		// A JOP-disabled process must not set thread state on a JOP-enabled process
631 		return KERN_PROTECTION_FAILURE;
632 	}
633 
634 	if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH) {
635 		if ((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
636 		    !thread_is_64bit_addr(thread)
637 		    ) {
638 			return KERN_SUCCESS;
639 		}
640 		// Disallow setting unsigned thread state on JOP-enabled processes.
641 		// Ignore flag and treat thread state arguments as signed, ptrauth
642 		// poisoning will cause resulting thread state to be invalid
643 		ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_NO_PTRAUTH;
644 	}
645 
646 	if (ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
647 		// lr might contain an IB-signed return address (strip is a no-op on unsigned addresses)
648 		uintptr_t stripped_lr = (uintptr_t)ptrauth_strip((void *)ts64->lr,
649 		    ptrauth_key_return_address);
650 		if (ts64->lr == stripped_lr) {
651 			// Don't allow unsigned pointer to be passed through as is. Ignore flag and
652 			// treat as IA-signed below (where auth failure may poison the value).
653 			ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
654 		}
655 		// Note that an IB-signed return address that happens to have a 0 signature value
656 		// will also have been IA-signed (without this flag being set) and so will IA-auth
657 		// correctly below.
658 	}
659 
660 	if (arm_user_jop_disabled()) {
661 		return KERN_SUCCESS;
662 	}
663 
664 	kernel_signed_pc = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_PC);
665 	kernel_signed_lr = !!(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_KERNEL_SIGNED_LR);
666 	/*
667 	 * Replace pc/lr with old state if allow only
668 	 * user ptr flag is passed and ptrs are marked
669 	 * kernel signed.
670 	 */
671 	if ((tssf_flags & TSSF_CHECK_USER_FLAGS) &&
672 	    (kernel_signed_pc || kernel_signed_lr)) {
673 		if (old_ts64 && old_count == count) {
674 			/* Send a CA event if the thread state does not match */
675 			machine_thread_state_check_pac_state(ts64, old_ts64);
676 
677 			/* Check if user ptrs needs to be replaced */
678 			if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
679 			    kernel_signed_pc) {
680 				ts64->pc = old_ts64->pc;
681 			}
682 
683 			if ((tssf_flags & TSSF_ALLOW_ONLY_USER_PTRS) &&
684 			    !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) &&
685 			    kernel_signed_lr) {
686 				ts64->lr = old_ts64->lr;
687 				if (old_ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR) {
688 					ts64->flags |= __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
689 				} else {
690 					ts64->flags &= ~__DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR;
691 				}
692 			}
693 		}
694 	}
695 
696 	/* Validate sigreturn token */
697 	if (tssf_flags & TSSF_CHECK_SIGRETURN_TOKEN) {
698 		bool token_matched = machine_thread_state_check_sigreturn_token(ts64, thread);
699 		if ((tssf_flags & TSSF_ALLOW_ONLY_MATCHING_TOKEN) && !token_matched) {
700 			return KERN_PROTECTION_FAILURE;
701 		}
702 	}
703 
704 	/* Get the userland diversifier */
705 	if (random_div && old_ts64 && old_count == count) {
706 		/* Get the random diversifier from the old thread state */
707 		userland_diversifier = (void *)(long)(old_ts64->flags &
708 		    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
709 	} else if (thread_div) {
710 		userland_diversifier = (void *)(long)(thread_get_sigreturn_diversifier(thread) &
711 		    __DARWIN_ARM_THREAD_STATE64_USER_DIVERSIFIER_MASK);
712 	}
713 
714 	if (ts64->pc) {
715 		uint64_t discriminator = ptrauth_string_discriminator("pc");
716 		if (!kernel_signed_pc && userland_diversifier != 0) {
717 			discriminator = ptrauth_blend_discriminator(userland_diversifier,
718 			    ptrauth_string_discriminator("pc"));
719 		}
720 		ts64->pc = (uintptr_t)pmap_auth_user_ptr((void*)ts64->pc,
721 		    ptrauth_key_process_independent_code, discriminator,
722 		    thread->machine.jop_pid);
723 	}
724 	if (ts64->lr && !(ts64->flags & __DARWIN_ARM_THREAD_STATE64_FLAGS_IB_SIGNED_LR)) {
725 		uint64_t discriminator = ptrauth_string_discriminator("lr");
726 		if (!kernel_signed_lr && userland_diversifier != 0) {
727 			discriminator = ptrauth_blend_discriminator(userland_diversifier,
728 			    ptrauth_string_discriminator("lr"));
729 		}
730 		ts64->lr = (uintptr_t)pmap_auth_user_ptr((void*)ts64->lr,
731 		    ptrauth_key_process_independent_code, discriminator,
732 		    thread->machine.jop_pid);
733 	}
734 	if (ts64->sp) {
735 		ts64->sp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->sp,
736 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("sp"),
737 		    thread->machine.jop_pid);
738 	}
739 	if (ts64->fp) {
740 		ts64->fp = (uintptr_t)pmap_auth_user_ptr((void*)ts64->fp,
741 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("fp"),
742 		    thread->machine.jop_pid);
743 	}
744 
745 	return KERN_SUCCESS;
746 #else
747 	// No conversion from userspace representation on this platform
748 	(void)thread; (void)flavor; (void)tstate; (void)count;
749 	(void)old_tstate; (void)old_count; (void)tssf_flags;
750 	return KERN_SUCCESS;
751 #endif /* __has_feature(ptrauth_calls) */
752 }
753 
754 #if __has_feature(ptrauth_calls)
755 bool
machine_thread_state_is_debug_flavor(int flavor)756 machine_thread_state_is_debug_flavor(int flavor)
757 {
758 	if (flavor == ARM_DEBUG_STATE ||
759 	    flavor == ARM_DEBUG_STATE64 ||
760 	    flavor == ARM_DEBUG_STATE32) {
761 		return true;
762 	}
763 	return false;
764 }
765 #endif /* __has_feature(ptrauth_calls) */
766 
767 /*
768  * Translate signal context data pointer to userspace representation
769  */
770 
771 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(thread_t thread,user_addr_t * uctxp)772 machine_thread_siguctx_pointer_convert_to_user(
773 	thread_t thread,
774 	user_addr_t *uctxp)
775 {
776 #if __has_feature(ptrauth_calls)
777 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
778 	    !thread_is_64bit_addr(current_thread())) {
779 		assert((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) || !thread_is_64bit_addr(thread));
780 		return KERN_SUCCESS;
781 	}
782 
783 	if (arm_user_jop_disabled()) {
784 		return KERN_SUCCESS;
785 	}
786 
787 	if (*uctxp) {
788 		*uctxp = (uintptr_t)pmap_sign_user_ptr((void*)*uctxp,
789 		    ptrauth_key_process_independent_data, ptrauth_string_discriminator("uctx"),
790 		    thread->machine.jop_pid);
791 	}
792 
793 	return KERN_SUCCESS;
794 #else
795 	// No conversion to userspace representation on this platform
796 	(void)thread; (void)uctxp;
797 	return KERN_SUCCESS;
798 #endif /* __has_feature(ptrauth_calls) */
799 }
800 
801 /*
802  * Translate array of function pointer syscall arguments from userspace representation
803  */
804 
805 kern_return_t
machine_thread_function_pointers_convert_from_user(thread_t thread,user_addr_t * fptrs,uint32_t count)806 machine_thread_function_pointers_convert_from_user(
807 	thread_t thread,
808 	user_addr_t *fptrs,
809 	uint32_t count)
810 {
811 #if __has_feature(ptrauth_calls)
812 	if ((current_thread()->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
813 	    !thread_is_64bit_addr(current_thread())) {
814 		assert((thread->machine.arm_machine_flags & ARM_MACHINE_THREAD_DISABLE_USER_JOP) ||
815 		    !thread_is_64bit_addr(thread));
816 		return KERN_SUCCESS;
817 	}
818 
819 	if (arm_user_jop_disabled()) {
820 		return KERN_SUCCESS;
821 	}
822 
823 	while (count--) {
824 		if (*fptrs) {
825 			*fptrs = (uintptr_t)pmap_auth_user_ptr((void*)*fptrs,
826 			    ptrauth_key_function_pointer, 0, thread->machine.jop_pid);
827 		}
828 		fptrs++;
829 	}
830 
831 	return KERN_SUCCESS;
832 #else
833 	// No conversion from userspace representation on this platform
834 	(void)thread; (void)fptrs; (void)count;
835 	return KERN_SUCCESS;
836 #endif /* __has_feature(ptrauth_calls) */
837 }
838 
839 /*
840  * Routine: machine_thread_get_state
841  *
842  */
843 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)844 machine_thread_get_state(thread_t                 thread,
845     thread_flavor_t          flavor,
846     thread_state_t           tstate,
847     mach_msg_type_number_t * count)
848 {
849 	switch (flavor) {
850 	case THREAD_STATE_FLAVOR_LIST:
851 		if (*count < 4) {
852 			return KERN_INVALID_ARGUMENT;
853 		}
854 
855 		tstate[0] = ARM_THREAD_STATE;
856 		tstate[1] = ARM_VFP_STATE;
857 		tstate[2] = ARM_EXCEPTION_STATE;
858 		tstate[3] = ARM_DEBUG_STATE;
859 		*count = 4;
860 		break;
861 
862 	case THREAD_STATE_FLAVOR_LIST_NEW:
863 		if (*count < 4) {
864 			return KERN_INVALID_ARGUMENT;
865 		}
866 
867 		tstate[0] = ARM_THREAD_STATE;
868 		tstate[1] = ARM_VFP_STATE;
869 		tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
870 		tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
871 		*count = 4;
872 		break;
873 
874 	case THREAD_STATE_FLAVOR_LIST_10_15:
875 		if (*count < 5) {
876 			return KERN_INVALID_ARGUMENT;
877 		}
878 
879 		tstate[0] = ARM_THREAD_STATE;
880 		tstate[1] = ARM_VFP_STATE;
881 		tstate[2] = thread_is_64bit_data(thread) ? ARM_EXCEPTION_STATE64 : ARM_EXCEPTION_STATE;
882 		tstate[3] = thread_is_64bit_data(thread) ? ARM_DEBUG_STATE64 : ARM_DEBUG_STATE32;
883 		tstate[4] = ARM_PAGEIN_STATE;
884 		*count = 5;
885 		break;
886 
887 	case ARM_THREAD_STATE:
888 	{
889 		kern_return_t rn = handle_get_arm_thread_state(tstate, count, thread->machine.upcb);
890 		if (rn) {
891 			return rn;
892 		}
893 		break;
894 	}
895 	case ARM_THREAD_STATE32:
896 	{
897 		if (thread_is_64bit_data(thread)) {
898 			return KERN_INVALID_ARGUMENT;
899 		}
900 
901 		kern_return_t rn = handle_get_arm32_thread_state(tstate, count, thread->machine.upcb);
902 		if (rn) {
903 			return rn;
904 		}
905 		break;
906 	}
907 #if __arm64__
908 	case ARM_THREAD_STATE64:
909 	{
910 		if (!thread_is_64bit_data(thread)) {
911 			return KERN_INVALID_ARGUMENT;
912 		}
913 
914 		const arm_saved_state_t *current_state = thread->machine.upcb;
915 
916 		kern_return_t rn = handle_get_arm64_thread_state(tstate, count,
917 		    current_state);
918 		if (rn) {
919 			return rn;
920 		}
921 
922 		break;
923 	}
924 #endif
925 	case ARM_EXCEPTION_STATE:{
926 		struct arm_exception_state *state;
927 		struct arm_saved_state32 *saved_state;
928 
929 		if (*count < ARM_EXCEPTION_STATE_COUNT) {
930 			return KERN_INVALID_ARGUMENT;
931 		}
932 		if (thread_is_64bit_data(thread)) {
933 			return KERN_INVALID_ARGUMENT;
934 		}
935 
936 		state = (struct arm_exception_state *) tstate;
937 		saved_state = saved_state32(thread->machine.upcb);
938 
939 		state->exception = saved_state->exception;
940 		state->fsr = saved_state->esr;
941 		state->far = saved_state->far;
942 
943 		*count = ARM_EXCEPTION_STATE_COUNT;
944 		break;
945 	}
946 	case ARM_EXCEPTION_STATE64:{
947 		struct arm_exception_state64 *state;
948 		struct arm_saved_state64 *saved_state;
949 
950 		if (*count < ARM_EXCEPTION_STATE64_COUNT) {
951 			return KERN_INVALID_ARGUMENT;
952 		}
953 		if (!thread_is_64bit_data(thread)) {
954 			return KERN_INVALID_ARGUMENT;
955 		}
956 
957 		state = (struct arm_exception_state64 *) tstate;
958 		saved_state = saved_state64(thread->machine.upcb);
959 
960 		state->exception = saved_state->exception;
961 		state->far = saved_state->far;
962 		state->esr = saved_state->esr;
963 
964 		*count = ARM_EXCEPTION_STATE64_COUNT;
965 		break;
966 	}
967 	case ARM_DEBUG_STATE:{
968 		arm_legacy_debug_state_t *state;
969 		arm_debug_state32_t *thread_state;
970 
971 		if (*count < ARM_LEGACY_DEBUG_STATE_COUNT) {
972 			return KERN_INVALID_ARGUMENT;
973 		}
974 
975 		if (thread_is_64bit_data(thread)) {
976 			return KERN_INVALID_ARGUMENT;
977 		}
978 
979 		state = (arm_legacy_debug_state_t *) tstate;
980 		thread_state = find_debug_state32(thread);
981 
982 		if (thread_state == NULL) {
983 			bzero(state, sizeof(arm_legacy_debug_state_t));
984 		} else {
985 			bcopy(thread_state, state, sizeof(arm_legacy_debug_state_t));
986 		}
987 
988 		*count = ARM_LEGACY_DEBUG_STATE_COUNT;
989 		break;
990 	}
991 	case ARM_DEBUG_STATE32:{
992 		arm_debug_state32_t *state;
993 		arm_debug_state32_t *thread_state;
994 
995 		if (*count < ARM_DEBUG_STATE32_COUNT) {
996 			return KERN_INVALID_ARGUMENT;
997 		}
998 
999 		if (thread_is_64bit_data(thread)) {
1000 			return KERN_INVALID_ARGUMENT;
1001 		}
1002 
1003 		state = (arm_debug_state32_t *) tstate;
1004 		thread_state = find_debug_state32(thread);
1005 
1006 		if (thread_state == NULL) {
1007 			bzero(state, sizeof(arm_debug_state32_t));
1008 		} else {
1009 			bcopy(thread_state, state, sizeof(arm_debug_state32_t));
1010 		}
1011 
1012 		*count = ARM_DEBUG_STATE32_COUNT;
1013 		break;
1014 	}
1015 
1016 	case ARM_DEBUG_STATE64:{
1017 		arm_debug_state64_t *state;
1018 		arm_debug_state64_t *thread_state;
1019 
1020 		if (*count < ARM_DEBUG_STATE64_COUNT) {
1021 			return KERN_INVALID_ARGUMENT;
1022 		}
1023 
1024 		if (!thread_is_64bit_data(thread)) {
1025 			return KERN_INVALID_ARGUMENT;
1026 		}
1027 
1028 		state = (arm_debug_state64_t *) tstate;
1029 		thread_state = find_debug_state64(thread);
1030 
1031 		if (thread_state == NULL) {
1032 			bzero(state, sizeof(arm_debug_state64_t));
1033 		} else {
1034 			bcopy(thread_state, state, sizeof(arm_debug_state64_t));
1035 		}
1036 
1037 		*count = ARM_DEBUG_STATE64_COUNT;
1038 		break;
1039 	}
1040 
1041 	case ARM_VFP_STATE:{
1042 		struct arm_vfp_state *state;
1043 		arm_neon_saved_state32_t *thread_state;
1044 		unsigned int max;
1045 
1046 		if (*count < ARM_VFP_STATE_COUNT) {
1047 			if (*count < ARM_VFPV2_STATE_COUNT) {
1048 				return KERN_INVALID_ARGUMENT;
1049 			} else {
1050 				*count =  ARM_VFPV2_STATE_COUNT;
1051 			}
1052 		}
1053 
1054 		if (*count == ARM_VFPV2_STATE_COUNT) {
1055 			max = 32;
1056 		} else {
1057 			max = 64;
1058 		}
1059 
1060 		state = (struct arm_vfp_state *) tstate;
1061 		thread_state = neon_state32(thread->machine.uNeon);
1062 		/* ARM64 TODO: set fpsr and fpcr from state->fpscr */
1063 
1064 		bcopy(thread_state, state, (max + 1) * sizeof(uint32_t));
1065 		*count = (max + 1);
1066 		break;
1067 	}
1068 	case ARM_NEON_STATE:{
1069 		arm_neon_state_t *state;
1070 		arm_neon_saved_state32_t *thread_state;
1071 
1072 		if (*count < ARM_NEON_STATE_COUNT) {
1073 			return KERN_INVALID_ARGUMENT;
1074 		}
1075 
1076 		if (thread_is_64bit_data(thread)) {
1077 			return KERN_INVALID_ARGUMENT;
1078 		}
1079 
1080 		state = (arm_neon_state_t *)tstate;
1081 		thread_state = neon_state32(thread->machine.uNeon);
1082 
1083 		assert(sizeof(*thread_state) == sizeof(*state));
1084 		bcopy(thread_state, state, sizeof(arm_neon_state_t));
1085 
1086 		*count = ARM_NEON_STATE_COUNT;
1087 		break;
1088 	}
1089 
1090 	case ARM_NEON_STATE64:{
1091 		arm_neon_state64_t *state;
1092 		arm_neon_saved_state64_t *thread_state;
1093 
1094 		if (*count < ARM_NEON_STATE64_COUNT) {
1095 			return KERN_INVALID_ARGUMENT;
1096 		}
1097 
1098 		if (!thread_is_64bit_data(thread)) {
1099 			return KERN_INVALID_ARGUMENT;
1100 		}
1101 
1102 		state = (arm_neon_state64_t *)tstate;
1103 		thread_state = neon_state64(thread->machine.uNeon);
1104 
1105 		/* For now, these are identical */
1106 		assert(sizeof(*state) == sizeof(*thread_state));
1107 		bcopy(thread_state, state, sizeof(arm_neon_state64_t));
1108 
1109 
1110 		*count = ARM_NEON_STATE64_COUNT;
1111 		break;
1112 	}
1113 
1114 
1115 	case ARM_PAGEIN_STATE: {
1116 		arm_pagein_state_t *state;
1117 
1118 		if (*count < ARM_PAGEIN_STATE_COUNT) {
1119 			return KERN_INVALID_ARGUMENT;
1120 		}
1121 
1122 		state = (arm_pagein_state_t *)tstate;
1123 		state->__pagein_error = thread->t_pagein_error;
1124 
1125 		*count = ARM_PAGEIN_STATE_COUNT;
1126 		break;
1127 	}
1128 
1129 
1130 	default:
1131 		return KERN_INVALID_ARGUMENT;
1132 	}
1133 	return KERN_SUCCESS;
1134 }
1135 
1136 
1137 /*
1138  * Routine: machine_thread_get_kern_state
1139  *
1140  */
1141 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)1142 machine_thread_get_kern_state(thread_t                 thread,
1143     thread_flavor_t          flavor,
1144     thread_state_t           tstate,
1145     mach_msg_type_number_t * count)
1146 {
1147 	/*
1148 	 * This works only for an interrupted kernel thread
1149 	 */
1150 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1151 		return KERN_FAILURE;
1152 	}
1153 
1154 	switch (flavor) {
1155 	case ARM_THREAD_STATE:
1156 	{
1157 		kern_return_t rn = handle_get_arm_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1158 		if (rn) {
1159 			return rn;
1160 		}
1161 		break;
1162 	}
1163 	case ARM_THREAD_STATE32:
1164 	{
1165 		kern_return_t rn = handle_get_arm32_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1166 		if (rn) {
1167 			return rn;
1168 		}
1169 		break;
1170 	}
1171 #if __arm64__
1172 	case ARM_THREAD_STATE64:
1173 	{
1174 		kern_return_t rn = handle_get_arm64_thread_state(tstate, count, getCpuDatap()->cpu_int_state);
1175 		if (rn) {
1176 			return rn;
1177 		}
1178 		break;
1179 	}
1180 #endif
1181 	default:
1182 		return KERN_INVALID_ARGUMENT;
1183 	}
1184 	return KERN_SUCCESS;
1185 }
1186 
1187 void
machine_thread_switch_addrmode(thread_t thread)1188 machine_thread_switch_addrmode(thread_t thread)
1189 {
1190 	if (task_has_64Bit_data(get_threadtask(thread))) {
1191 		thread->machine.upcb->ash.flavor = ARM_SAVED_STATE64;
1192 		thread->machine.upcb->ash.count = ARM_SAVED_STATE64_COUNT;
1193 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1194 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1195 
1196 		/*
1197 		 * Reinitialize the NEON state.
1198 		 */
1199 		bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1200 		thread->machine.uNeon->ns_64.fpcr = FPCR_DEFAULT;
1201 	} else {
1202 		thread->machine.upcb->ash.flavor = ARM_SAVED_STATE32;
1203 		thread->machine.upcb->ash.count = ARM_SAVED_STATE32_COUNT;
1204 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1205 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1206 
1207 		/*
1208 		 * Reinitialize the NEON state.
1209 		 */
1210 		bzero(&thread->machine.uNeon->uns, sizeof(thread->machine.uNeon->uns));
1211 		thread->machine.uNeon->ns_32.fpcr = FPCR_DEFAULT_32;
1212 	}
1213 }
1214 
1215 extern long long arm_debug_get(void);
1216 
1217 /*
1218  * Routine: machine_thread_set_state
1219  *
1220  */
1221 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)1222 machine_thread_set_state(thread_t               thread,
1223     thread_flavor_t        flavor,
1224     thread_state_t         tstate,
1225     mach_msg_type_number_t count)
1226 {
1227 	kern_return_t rn;
1228 
1229 	switch (flavor) {
1230 	case ARM_THREAD_STATE:
1231 		rn = handle_set_arm_thread_state(tstate, count, thread->machine.upcb);
1232 		if (rn) {
1233 			return rn;
1234 		}
1235 		break;
1236 
1237 	case ARM_THREAD_STATE32:
1238 		if (thread_is_64bit_data(thread)) {
1239 			return KERN_INVALID_ARGUMENT;
1240 		}
1241 
1242 		rn = handle_set_arm32_thread_state(tstate, count, thread->machine.upcb);
1243 		if (rn) {
1244 			return rn;
1245 		}
1246 		break;
1247 
1248 #if __arm64__
1249 	case ARM_THREAD_STATE64:
1250 		if (!thread_is_64bit_data(thread)) {
1251 			return KERN_INVALID_ARGUMENT;
1252 		}
1253 
1254 
1255 		rn = handle_set_arm64_thread_state(tstate, count, thread->machine.upcb);
1256 		if (rn) {
1257 			return rn;
1258 		}
1259 		break;
1260 #endif
1261 	case ARM_EXCEPTION_STATE:{
1262 		if (count != ARM_EXCEPTION_STATE_COUNT) {
1263 			return KERN_INVALID_ARGUMENT;
1264 		}
1265 		if (thread_is_64bit_data(thread)) {
1266 			return KERN_INVALID_ARGUMENT;
1267 		}
1268 
1269 		break;
1270 	}
1271 	case ARM_EXCEPTION_STATE64:{
1272 		if (count != ARM_EXCEPTION_STATE64_COUNT) {
1273 			return KERN_INVALID_ARGUMENT;
1274 		}
1275 		if (!thread_is_64bit_data(thread)) {
1276 			return KERN_INVALID_ARGUMENT;
1277 		}
1278 
1279 		break;
1280 	}
1281 	case ARM_DEBUG_STATE:
1282 	{
1283 		arm_legacy_debug_state_t *state;
1284 		boolean_t enabled = FALSE;
1285 		unsigned int    i;
1286 
1287 		if (count != ARM_LEGACY_DEBUG_STATE_COUNT) {
1288 			return KERN_INVALID_ARGUMENT;
1289 		}
1290 		if (thread_is_64bit_data(thread)) {
1291 			return KERN_INVALID_ARGUMENT;
1292 		}
1293 
1294 		state = (arm_legacy_debug_state_t *) tstate;
1295 
1296 		for (i = 0; i < 16; i++) {
1297 			/* do not allow context IDs to be set */
1298 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1299 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1300 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1301 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1302 				return KERN_PROTECTION_FAILURE;
1303 			}
1304 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1305 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1306 				enabled = TRUE;
1307 			}
1308 		}
1309 
1310 		if (!enabled) {
1311 			free_debug_state(thread);
1312 		} else {
1313 			arm_debug_state32_t *thread_state = find_or_allocate_debug_state32(thread);
1314 
1315 			if (thread_state == NULL) {
1316 				return KERN_FAILURE;
1317 			}
1318 
1319 			for (i = 0; i < 16; i++) {
1320 				/* set appropriate privilege; mask out unknown bits */
1321 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1322 				    | ARM_DBGBCR_MATCH_MASK
1323 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1324 				    | ARM_DBG_CR_ENABLE_MASK))
1325 				    | ARM_DBGBCR_TYPE_IVA
1326 				    | ARM_DBG_CR_LINKED_UNLINKED
1327 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1328 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1329 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1330 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1331 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1332 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1333 				    | ARM_DBG_CR_ENABLE_MASK))
1334 				    | ARM_DBG_CR_LINKED_UNLINKED
1335 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1336 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1337 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1338 			}
1339 
1340 			thread_state->mdscr_el1 = 0ULL;         // Legacy customers issuing ARM_DEBUG_STATE dont drive single stepping.
1341 		}
1342 
1343 		if (thread == current_thread()) {
1344 			arm_debug_set32(thread->machine.DebugData);
1345 		}
1346 
1347 		break;
1348 	}
1349 	case ARM_DEBUG_STATE32:
1350 		/* ARM64_TODO  subtle bcr/wcr semantic differences e.g. wcr and ARM_DBGBCR_TYPE_IVA */
1351 	{
1352 		arm_debug_state32_t *state;
1353 		boolean_t enabled = FALSE;
1354 		unsigned int    i;
1355 
1356 		if (count != ARM_DEBUG_STATE32_COUNT) {
1357 			return KERN_INVALID_ARGUMENT;
1358 		}
1359 		if (thread_is_64bit_data(thread)) {
1360 			return KERN_INVALID_ARGUMENT;
1361 		}
1362 
1363 		state = (arm_debug_state32_t *) tstate;
1364 
1365 		if (state->mdscr_el1 & MDSCR_SS) {
1366 			enabled = TRUE;
1367 		}
1368 
1369 		for (i = 0; i < 16; i++) {
1370 			/* do not allow context IDs to be set */
1371 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1372 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1373 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1374 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1375 				return KERN_PROTECTION_FAILURE;
1376 			}
1377 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1378 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1379 				enabled = TRUE;
1380 			}
1381 		}
1382 
1383 		if (!enabled) {
1384 			free_debug_state(thread);
1385 		} else {
1386 			arm_debug_state32_t * thread_state = find_or_allocate_debug_state32(thread);
1387 
1388 			if (thread_state == NULL) {
1389 				return KERN_FAILURE;
1390 			}
1391 
1392 			if (state->mdscr_el1 & MDSCR_SS) {
1393 				thread_state->mdscr_el1 |= MDSCR_SS;
1394 			} else {
1395 				thread_state->mdscr_el1 &= ~MDSCR_SS;
1396 			}
1397 
1398 			for (i = 0; i < 16; i++) {
1399 				/* set appropriate privilege; mask out unknown bits */
1400 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1401 				    | ARM_DBGBCR_MATCH_MASK
1402 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1403 				    | ARM_DBG_CR_ENABLE_MASK))
1404 				    | ARM_DBGBCR_TYPE_IVA
1405 				    | ARM_DBG_CR_LINKED_UNLINKED
1406 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1407 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1408 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1409 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1410 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1411 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1412 				    | ARM_DBG_CR_ENABLE_MASK))
1413 				    | ARM_DBG_CR_LINKED_UNLINKED
1414 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1415 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1416 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
1417 			}
1418 		}
1419 
1420 		if (thread == current_thread()) {
1421 			arm_debug_set32(thread->machine.DebugData);
1422 		}
1423 
1424 		break;
1425 	}
1426 
1427 	case ARM_DEBUG_STATE64:
1428 	{
1429 		arm_debug_state64_t *state;
1430 		boolean_t enabled = FALSE;
1431 		unsigned int i;
1432 
1433 		if (count != ARM_DEBUG_STATE64_COUNT) {
1434 			return KERN_INVALID_ARGUMENT;
1435 		}
1436 		if (!thread_is_64bit_data(thread)) {
1437 			return KERN_INVALID_ARGUMENT;
1438 		}
1439 
1440 		state = (arm_debug_state64_t *) tstate;
1441 
1442 		if (state->mdscr_el1 & MDSCR_SS) {
1443 			enabled = TRUE;
1444 		}
1445 
1446 		for (i = 0; i < 16; i++) {
1447 			/* do not allow context IDs to be set */
1448 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
1449 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
1450 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
1451 				return KERN_PROTECTION_FAILURE;
1452 			}
1453 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
1454 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
1455 				enabled = TRUE;
1456 			}
1457 		}
1458 
1459 		if (!enabled) {
1460 			free_debug_state(thread);
1461 		} else {
1462 			arm_debug_state64_t *thread_state = find_or_allocate_debug_state64(thread);
1463 
1464 			if (thread_state == NULL) {
1465 				return KERN_FAILURE;
1466 			}
1467 
1468 			if (state->mdscr_el1 & MDSCR_SS) {
1469 				thread_state->mdscr_el1 |= MDSCR_SS;
1470 			} else {
1471 				thread_state->mdscr_el1 &= ~MDSCR_SS;
1472 			}
1473 
1474 			for (i = 0; i < 16; i++) {
1475 				/* set appropriate privilege; mask out unknown bits */
1476 				thread_state->bcr[i] = (state->bcr[i] & (0         /* Was ARM_DBG_CR_ADDRESS_MASK_MASK deprecated in v8 */
1477 				    | 0                             /* Was ARM_DBGBCR_MATCH_MASK, ignored in AArch64 state */
1478 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
1479 				    | ARM_DBG_CR_ENABLE_MASK))
1480 				    | ARM_DBGBCR_TYPE_IVA
1481 				    | ARM_DBG_CR_LINKED_UNLINKED
1482 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1483 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1484 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1485 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
1486 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
1487 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
1488 				    | ARM_DBG_CR_ENABLE_MASK))
1489 				    | ARM_DBG_CR_LINKED_UNLINKED
1490 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
1491 				    | ARM_DBG_CR_MODE_CONTROL_USER;
1492 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK64;
1493 			}
1494 		}
1495 
1496 		if (thread == current_thread()) {
1497 			arm_debug_set64(thread->machine.DebugData);
1498 		}
1499 
1500 		break;
1501 	}
1502 
1503 	case ARM_VFP_STATE:{
1504 		struct arm_vfp_state *state;
1505 		arm_neon_saved_state32_t *thread_state;
1506 		unsigned int    max;
1507 
1508 		if (count != ARM_VFP_STATE_COUNT && count != ARM_VFPV2_STATE_COUNT) {
1509 			return KERN_INVALID_ARGUMENT;
1510 		}
1511 
1512 		if (count == ARM_VFPV2_STATE_COUNT) {
1513 			max = 32;
1514 		} else {
1515 			max = 64;
1516 		}
1517 
1518 		state = (struct arm_vfp_state *) tstate;
1519 		thread_state = neon_state32(thread->machine.uNeon);
1520 		/* ARM64 TODO: combine fpsr and fpcr into state->fpscr */
1521 
1522 		bcopy(state, thread_state, (max + 1) * sizeof(uint32_t));
1523 
1524 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1525 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1526 		break;
1527 	}
1528 
1529 	case ARM_NEON_STATE:{
1530 		arm_neon_state_t *state;
1531 		arm_neon_saved_state32_t *thread_state;
1532 
1533 		if (count != ARM_NEON_STATE_COUNT) {
1534 			return KERN_INVALID_ARGUMENT;
1535 		}
1536 
1537 		if (thread_is_64bit_data(thread)) {
1538 			return KERN_INVALID_ARGUMENT;
1539 		}
1540 
1541 		state = (arm_neon_state_t *)tstate;
1542 		thread_state = neon_state32(thread->machine.uNeon);
1543 
1544 		assert(sizeof(*state) == sizeof(*thread_state));
1545 		bcopy(state, thread_state, sizeof(arm_neon_state_t));
1546 
1547 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE32;
1548 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE32_COUNT;
1549 		break;
1550 	}
1551 
1552 	case ARM_NEON_STATE64:{
1553 		arm_neon_state64_t *state;
1554 		arm_neon_saved_state64_t *thread_state;
1555 
1556 		if (count != ARM_NEON_STATE64_COUNT) {
1557 			return KERN_INVALID_ARGUMENT;
1558 		}
1559 
1560 		if (!thread_is_64bit_data(thread)) {
1561 			return KERN_INVALID_ARGUMENT;
1562 		}
1563 
1564 		state = (arm_neon_state64_t *)tstate;
1565 		thread_state = neon_state64(thread->machine.uNeon);
1566 
1567 		assert(sizeof(*state) == sizeof(*thread_state));
1568 		bcopy(state, thread_state, sizeof(arm_neon_state64_t));
1569 
1570 
1571 		thread->machine.uNeon->nsh.flavor = ARM_NEON_SAVED_STATE64;
1572 		thread->machine.uNeon->nsh.count = ARM_NEON_SAVED_STATE64_COUNT;
1573 		break;
1574 	}
1575 
1576 
1577 	default:
1578 		return KERN_INVALID_ARGUMENT;
1579 	}
1580 	return KERN_SUCCESS;
1581 }
1582 
1583 mach_vm_address_t
machine_thread_pc(thread_t thread)1584 machine_thread_pc(thread_t thread)
1585 {
1586 	struct arm_saved_state *ss = get_user_regs(thread);
1587 	return (mach_vm_address_t)get_saved_state_pc(ss);
1588 }
1589 
1590 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)1591 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
1592 {
1593 	set_saved_state_pc(get_user_regs(thread), (register_t)pc);
1594 }
1595 
1596 /*
1597  * Routine: machine_thread_state_initialize
1598  *
1599  */
1600 void
machine_thread_state_initialize(thread_t thread)1601 machine_thread_state_initialize(thread_t thread)
1602 {
1603 	arm_context_t *context = thread->machine.contextData;
1604 
1605 	/*
1606 	 * Should always be set up later. For a kernel thread, we don't care
1607 	 * about this state. For a user thread, we'll set the state up in
1608 	 * setup_wqthread, bsdthread_create, load_main(), or load_unixthread().
1609 	 */
1610 
1611 	if (context != NULL) {
1612 		bzero(&context->ss.uss, sizeof(context->ss.uss));
1613 		bzero(&context->ns.uns, sizeof(context->ns.uns));
1614 
1615 		if (context->ns.nsh.flavor == ARM_NEON_SAVED_STATE64) {
1616 			context->ns.ns_64.fpcr = FPCR_DEFAULT;
1617 		} else {
1618 			context->ns.ns_32.fpcr = FPCR_DEFAULT_32;
1619 		}
1620 		context->ss.ss_64.cpsr = PSR64_USER64_DEFAULT;
1621 	}
1622 
1623 	thread->machine.DebugData = NULL;
1624 
1625 #if defined(HAS_APPLE_PAC)
1626 	/* Sign the initial user-space thread state */
1627 	if (thread->machine.upcb != NULL) {
1628 		uint64_t intr = ml_pac_safe_interrupts_disable();
1629 		asm volatile (
1630                         "mov	x0, %[iss]"             "\n"
1631                         "mov	x1, #0"                 "\n"
1632                         "mov	w2, %w[usr]"            "\n"
1633                         "mov	x3, #0"                 "\n"
1634                         "mov	x4, #0"                 "\n"
1635                         "mov	x5, #0"                 "\n"
1636                         "mov	x6, lr"                 "\n"
1637                         "msr	SPSel, #1"              "\n"
1638                         "bl     _ml_sign_thread_state"  "\n"
1639                         "msr	SPSel, #0"              "\n"
1640                         "mov	lr, x6"                 "\n"
1641                         :
1642                         : [iss] "r"(thread->machine.upcb), [usr] "r"(thread->machine.upcb->ss_64.cpsr)
1643                         : "x0", "x1", "x2", "x3", "x4", "x5", "x6"
1644                 );
1645 		ml_pac_safe_interrupts_restore(intr);
1646 	}
1647 #endif /* defined(HAS_APPLE_PAC) */
1648 }
1649 
1650 /*
1651  * Routine: machine_thread_dup
1652  *
1653  */
1654 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)1655 machine_thread_dup(thread_t self,
1656     thread_t target,
1657     __unused boolean_t is_corpse)
1658 {
1659 	struct arm_saved_state *self_saved_state;
1660 	struct arm_saved_state *target_saved_state;
1661 
1662 	target->machine.cthread_self = self->machine.cthread_self;
1663 
1664 	self_saved_state = self->machine.upcb;
1665 	target_saved_state = target->machine.upcb;
1666 	bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
1667 #if defined(HAS_APPLE_PAC)
1668 	if (!is_corpse && is_saved_state64(self_saved_state)) {
1669 		check_and_sign_copied_thread_state(target_saved_state, self_saved_state);
1670 	}
1671 #endif /* defined(HAS_APPLE_PAC) */
1672 
1673 	arm_neon_saved_state_t *self_neon_state = self->machine.uNeon;
1674 	arm_neon_saved_state_t *target_neon_state = target->machine.uNeon;
1675 	bcopy(self_neon_state, target_neon_state, sizeof(*target_neon_state));
1676 
1677 
1678 	return KERN_SUCCESS;
1679 }
1680 
1681 /*
1682  * Routine: get_user_regs
1683  *
1684  */
1685 struct arm_saved_state *
get_user_regs(thread_t thread)1686 get_user_regs(thread_t thread)
1687 {
1688 	return thread->machine.upcb;
1689 }
1690 
1691 arm_neon_saved_state_t *
get_user_neon_regs(thread_t thread)1692 get_user_neon_regs(thread_t thread)
1693 {
1694 	return thread->machine.uNeon;
1695 }
1696 
1697 /*
1698  * Routine: find_user_regs
1699  *
1700  */
1701 struct arm_saved_state *
find_user_regs(thread_t thread)1702 find_user_regs(thread_t thread)
1703 {
1704 	return thread->machine.upcb;
1705 }
1706 
1707 /*
1708  * Routine: find_kern_regs
1709  *
1710  */
1711 struct arm_saved_state *
find_kern_regs(thread_t thread)1712 find_kern_regs(thread_t thread)
1713 {
1714 	/*
1715 	 * This works only for an interrupted kernel thread
1716 	 */
1717 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
1718 		return (struct arm_saved_state *) NULL;
1719 	} else {
1720 		return getCpuDatap()->cpu_int_state;
1721 	}
1722 }
1723 
1724 arm_debug_state32_t *
find_debug_state32(thread_t thread)1725 find_debug_state32(thread_t thread)
1726 {
1727 	if (thread && thread->machine.DebugData) {
1728 		return &(thread->machine.DebugData->uds.ds32);
1729 	} else {
1730 		return NULL;
1731 	}
1732 }
1733 
1734 arm_debug_state64_t *
find_debug_state64(thread_t thread)1735 find_debug_state64(thread_t thread)
1736 {
1737 	if (thread && thread->machine.DebugData) {
1738 		return &(thread->machine.DebugData->uds.ds64);
1739 	} else {
1740 		return NULL;
1741 	}
1742 }
1743 
1744 os_refgrp_decl(static, dbg_refgrp, "arm_debug_state", NULL);
1745 
1746 /**
1747  *  Finds the debug state for the given 64 bit thread, allocating one if it
1748  *  does not exist.
1749  *
1750  *  @param thread 64 bit thread to find or allocate debug state for
1751  *
1752  *  @returns A pointer to the given thread's 64 bit debug state or a null
1753  *           pointer if the given thread is null or the allocation of a new
1754  *           debug state fails.
1755  */
1756 arm_debug_state64_t *
find_or_allocate_debug_state64(thread_t thread)1757 find_or_allocate_debug_state64(thread_t thread)
1758 {
1759 	arm_debug_state64_t *thread_state = find_debug_state64(thread);
1760 	if (thread != NULL && thread_state == NULL) {
1761 		thread->machine.DebugData = zalloc_flags(ads_zone,
1762 		    Z_WAITOK | Z_NOFAIL);
1763 		bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1764 		thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE64;
1765 		thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE64_COUNT;
1766 		os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1767 		thread_state = find_debug_state64(thread);
1768 	}
1769 	return thread_state;
1770 }
1771 
1772 /**
1773  *  Finds the debug state for the given 32 bit thread, allocating one if it
1774  *  does not exist.
1775  *
1776  *  @param thread 32 bit thread to find or allocate debug state for
1777  *
1778  *  @returns A pointer to the given thread's 32 bit debug state or a null
1779  *           pointer if the given thread is null or the allocation of a new
1780  *           debug state fails.
1781  */
1782 arm_debug_state32_t *
find_or_allocate_debug_state32(thread_t thread)1783 find_or_allocate_debug_state32(thread_t thread)
1784 {
1785 	arm_debug_state32_t *thread_state = find_debug_state32(thread);
1786 	if (thread != NULL && thread_state == NULL) {
1787 		thread->machine.DebugData = zalloc_flags(ads_zone,
1788 		    Z_WAITOK | Z_NOFAIL);
1789 		bzero(thread->machine.DebugData, sizeof *(thread->machine.DebugData));
1790 		thread->machine.DebugData->dsh.flavor = ARM_DEBUG_STATE32;
1791 		thread->machine.DebugData->dsh.count = ARM_DEBUG_STATE32_COUNT;
1792 		os_ref_init(&thread->machine.DebugData->ref, &dbg_refgrp);
1793 		thread_state = find_debug_state32(thread);
1794 	}
1795 	return thread_state;
1796 }
1797 
1798 /**
1799  *	Frees a thread's debug state if allocated. Otherwise does nothing.
1800  *
1801  *  @param thread thread to free the debug state of
1802  */
1803 static inline void
free_debug_state(thread_t thread)1804 free_debug_state(thread_t thread)
1805 {
1806 	if (thread != NULL && thread->machine.DebugData != NULL) {
1807 		arm_debug_state_t *pTmp = thread->machine.DebugData;
1808 		thread->machine.DebugData = NULL;
1809 
1810 		if (os_ref_release(&pTmp->ref) == 0) {
1811 			zfree(ads_zone, pTmp);
1812 		}
1813 	}
1814 }
1815 
1816 /*
1817  * Routine: thread_userstack
1818  *
1819  */
1820 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,boolean_t is_64bit_data)1821 thread_userstack(__unused thread_t  thread,
1822     int                flavor,
1823     thread_state_t     tstate,
1824     unsigned int       count,
1825     mach_vm_offset_t * user_stack,
1826     int *              customstack,
1827     boolean_t          is_64bit_data
1828     )
1829 {
1830 	register_t sp;
1831 
1832 	switch (flavor) {
1833 	case ARM_THREAD_STATE:
1834 		if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
1835 #if __arm64__
1836 			if (is_64bit_data) {
1837 				sp = ((arm_unified_thread_state_t *)tstate)->ts_64.sp;
1838 			} else
1839 #endif
1840 			{
1841 				sp = ((arm_unified_thread_state_t *)tstate)->ts_32.sp;
1842 			}
1843 
1844 			break;
1845 		}
1846 
1847 		/* INTENTIONAL FALL THROUGH (see machine_thread_set_state) */
1848 		OS_FALLTHROUGH;
1849 	case ARM_THREAD_STATE32:
1850 		if (count != ARM_THREAD_STATE32_COUNT) {
1851 			return KERN_INVALID_ARGUMENT;
1852 		}
1853 		if (is_64bit_data) {
1854 			return KERN_INVALID_ARGUMENT;
1855 		}
1856 
1857 		sp = ((arm_thread_state32_t *)tstate)->sp;
1858 		break;
1859 #if __arm64__
1860 	case ARM_THREAD_STATE64:
1861 		if (count != ARM_THREAD_STATE64_COUNT) {
1862 			return KERN_INVALID_ARGUMENT;
1863 		}
1864 		if (!is_64bit_data) {
1865 			return KERN_INVALID_ARGUMENT;
1866 		}
1867 
1868 		sp = ((arm_thread_state32_t *)tstate)->sp;
1869 		break;
1870 #endif
1871 	default:
1872 		return KERN_INVALID_ARGUMENT;
1873 	}
1874 
1875 	if (sp) {
1876 		*user_stack = CAST_USER_ADDR_T(sp);
1877 		if (customstack) {
1878 			*customstack = 1;
1879 		}
1880 	} else {
1881 		*user_stack = CAST_USER_ADDR_T(USRSTACK64);
1882 		if (customstack) {
1883 			*customstack = 0;
1884 		}
1885 	}
1886 
1887 	return KERN_SUCCESS;
1888 }
1889 
1890 /*
1891  * thread_userstackdefault:
1892  *
1893  * Return the default stack location for the
1894  * thread, if otherwise unknown.
1895  */
1896 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit)1897 thread_userstackdefault(mach_vm_offset_t * default_user_stack,
1898     boolean_t          is64bit)
1899 {
1900 	if (is64bit) {
1901 		*default_user_stack = USRSTACK64;
1902 	} else {
1903 		*default_user_stack = USRSTACK;
1904 	}
1905 
1906 	return KERN_SUCCESS;
1907 }
1908 
1909 /*
1910  * Routine: thread_setuserstack
1911  *
1912  */
1913 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)1914 thread_setuserstack(thread_t          thread,
1915     mach_vm_address_t user_stack)
1916 {
1917 	struct arm_saved_state *sv;
1918 
1919 	sv = get_user_regs(thread);
1920 
1921 	set_saved_state_sp(sv, user_stack);
1922 
1923 	return;
1924 }
1925 
1926 /*
1927  * Routine: thread_adjuserstack
1928  *
1929  */
1930 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)1931 thread_adjuserstack(thread_t thread,
1932     int      adjust)
1933 {
1934 	struct arm_saved_state *sv;
1935 	uint64_t sp;
1936 
1937 	sv = get_user_regs(thread);
1938 
1939 	sp = get_saved_state_sp(sv);
1940 	sp += adjust;
1941 	set_saved_state_sp(sv, sp);
1942 
1943 	return sp;
1944 }
1945 
1946 
1947 /*
1948  * Routine: thread_setentrypoint
1949  *
1950  */
1951 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)1952 thread_setentrypoint(thread_t         thread,
1953     mach_vm_offset_t entry)
1954 {
1955 	struct arm_saved_state *sv;
1956 
1957 #if HAS_APPLE_PAC
1958 	uint64_t intr = ml_pac_safe_interrupts_disable();
1959 #endif
1960 
1961 	sv = get_user_regs(thread);
1962 
1963 	set_saved_state_pc(sv, entry);
1964 
1965 #if HAS_APPLE_PAC
1966 	ml_pac_safe_interrupts_restore(intr);
1967 #endif
1968 
1969 	return;
1970 }
1971 
1972 /*
1973  * Routine: thread_entrypoint
1974  *
1975  */
1976 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * entry_point)1977 thread_entrypoint(__unused thread_t  thread,
1978     int                flavor,
1979     thread_state_t     tstate,
1980     unsigned int       count,
1981     mach_vm_offset_t * entry_point
1982     )
1983 {
1984 	switch (flavor) {
1985 	case ARM_THREAD_STATE:
1986 	{
1987 		struct arm_thread_state *state;
1988 
1989 		if (count != ARM_THREAD_STATE_COUNT) {
1990 			return KERN_INVALID_ARGUMENT;
1991 		}
1992 
1993 		state = (struct arm_thread_state *) tstate;
1994 
1995 		/*
1996 		 * If a valid entry point is specified, use it.
1997 		 */
1998 		if (state->pc) {
1999 			*entry_point = CAST_USER_ADDR_T(state->pc);
2000 		} else {
2001 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
2002 		}
2003 	}
2004 	break;
2005 
2006 	case ARM_THREAD_STATE64:
2007 	{
2008 		struct arm_thread_state64 *state;
2009 
2010 		if (count != ARM_THREAD_STATE64_COUNT) {
2011 			return KERN_INVALID_ARGUMENT;
2012 		}
2013 
2014 		state = (struct arm_thread_state64*) tstate;
2015 
2016 		/*
2017 		 * If a valid entry point is specified, use it.
2018 		 */
2019 		if (state->pc) {
2020 			*entry_point = CAST_USER_ADDR_T(state->pc);
2021 		} else {
2022 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
2023 		}
2024 
2025 		break;
2026 	}
2027 	default:
2028 		return KERN_INVALID_ARGUMENT;
2029 	}
2030 
2031 	return KERN_SUCCESS;
2032 }
2033 
2034 
2035 /*
2036  * Routine: thread_set_child
2037  *
2038  */
2039 void
thread_set_child(thread_t child,int pid)2040 thread_set_child(thread_t child,
2041     int      pid)
2042 {
2043 	struct arm_saved_state *child_state;
2044 
2045 	child_state = get_user_regs(child);
2046 
2047 	set_saved_state_reg(child_state, 0, pid);
2048 	set_saved_state_reg(child_state, 1, 1ULL);
2049 }
2050 
2051 
2052 /*
2053  * Routine: thread_set_parent
2054  *
2055  */
2056 void
thread_set_parent(thread_t parent,int pid)2057 thread_set_parent(thread_t parent,
2058     int      pid)
2059 {
2060 	struct arm_saved_state *parent_state;
2061 
2062 	parent_state = get_user_regs(parent);
2063 
2064 	set_saved_state_reg(parent_state, 0, pid);
2065 	set_saved_state_reg(parent_state, 1, 0);
2066 }
2067 
2068 
2069 struct arm_act_context {
2070 	struct arm_unified_thread_state ss;
2071 #if __ARM_VFP__
2072 	struct arm_neon_saved_state ns;
2073 #endif
2074 };
2075 
2076 /*
2077  * Routine: act_thread_csave
2078  *
2079  */
2080 void *
act_thread_csave(void)2081 act_thread_csave(void)
2082 {
2083 	struct arm_act_context *ic;
2084 	kern_return_t   kret;
2085 	unsigned int    val;
2086 	thread_t thread = current_thread();
2087 
2088 	ic = kalloc_type(struct arm_act_context, Z_WAITOK);
2089 	if (ic == (struct arm_act_context *) NULL) {
2090 		return (void *) 0;
2091 	}
2092 
2093 	val = ARM_UNIFIED_THREAD_STATE_COUNT;
2094 	kret = machine_thread_get_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, &val);
2095 	if (kret != KERN_SUCCESS) {
2096 		kfree_type(struct arm_act_context, ic);
2097 		return (void *) 0;
2098 	}
2099 
2100 #if __ARM_VFP__
2101 	if (thread_is_64bit_data(thread)) {
2102 		val = ARM_NEON_STATE64_COUNT;
2103 		kret = machine_thread_get_state(thread,
2104 		    ARM_NEON_STATE64,
2105 		    (thread_state_t)&ic->ns,
2106 		    &val);
2107 	} else {
2108 		val = ARM_NEON_STATE_COUNT;
2109 		kret = machine_thread_get_state(thread,
2110 		    ARM_NEON_STATE,
2111 		    (thread_state_t)&ic->ns,
2112 		    &val);
2113 	}
2114 	if (kret != KERN_SUCCESS) {
2115 		kfree_type(struct arm_act_context, ic);
2116 		return (void *) 0;
2117 	}
2118 #endif
2119 	return ic;
2120 }
2121 
2122 /*
2123  * Routine: act_thread_catt
2124  *
2125  */
2126 void
act_thread_catt(void * ctx)2127 act_thread_catt(void * ctx)
2128 {
2129 	struct arm_act_context *ic;
2130 	kern_return_t   kret;
2131 	thread_t thread = current_thread();
2132 
2133 	ic = (struct arm_act_context *) ctx;
2134 	if (ic == (struct arm_act_context *) NULL) {
2135 		return;
2136 	}
2137 
2138 	kret = machine_thread_set_state(thread, ARM_THREAD_STATE, (thread_state_t)&ic->ss, ARM_UNIFIED_THREAD_STATE_COUNT);
2139 	if (kret != KERN_SUCCESS) {
2140 		goto out;
2141 	}
2142 
2143 #if __ARM_VFP__
2144 	if (thread_is_64bit_data(thread)) {
2145 		kret = machine_thread_set_state(thread,
2146 		    ARM_NEON_STATE64,
2147 		    (thread_state_t)&ic->ns,
2148 		    ARM_NEON_STATE64_COUNT);
2149 	} else {
2150 		kret = machine_thread_set_state(thread,
2151 		    ARM_NEON_STATE,
2152 		    (thread_state_t)&ic->ns,
2153 		    ARM_NEON_STATE_COUNT);
2154 	}
2155 	if (kret != KERN_SUCCESS) {
2156 		goto out;
2157 	}
2158 #endif
2159 out:
2160 	kfree_type(struct arm_act_context, ic);
2161 }
2162 
2163 /*
2164  * Routine: act_thread_catt
2165  *
2166  */
2167 void
act_thread_cfree(void * ctx)2168 act_thread_cfree(void *ctx)
2169 {
2170 	kfree_type(struct arm_act_context, ctx);
2171 }
2172 
2173 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)2174 thread_set_wq_state32(thread_t       thread,
2175     thread_state_t tstate)
2176 {
2177 	arm_thread_state_t *state;
2178 	struct arm_saved_state *saved_state;
2179 	struct arm_saved_state32 *saved_state_32;
2180 	thread_t curth = current_thread();
2181 	spl_t s = 0;
2182 
2183 	assert(!thread_is_64bit_data(thread));
2184 
2185 	saved_state = thread->machine.upcb;
2186 	saved_state_32 = saved_state32(saved_state);
2187 
2188 	state = (arm_thread_state_t *)tstate;
2189 
2190 	if (curth != thread) {
2191 		s = splsched();
2192 		thread_lock(thread);
2193 	}
2194 
2195 	/*
2196 	 * do not zero saved_state, it can be concurrently accessed
2197 	 * and zero is not a valid state for some of the registers,
2198 	 * like sp.
2199 	 */
2200 	thread_state32_to_saved_state(state, saved_state);
2201 	saved_state_32->cpsr = PSR64_USER32_DEFAULT;
2202 
2203 	if (curth != thread) {
2204 		thread_unlock(thread);
2205 		splx(s);
2206 	}
2207 
2208 	return KERN_SUCCESS;
2209 }
2210 
2211 kern_return_t
thread_set_wq_state64(thread_t thread,thread_state_t tstate)2212 thread_set_wq_state64(thread_t       thread,
2213     thread_state_t tstate)
2214 {
2215 	arm_thread_state64_t *state;
2216 	struct arm_saved_state *saved_state;
2217 	struct arm_saved_state64 *saved_state_64;
2218 	thread_t curth = current_thread();
2219 	spl_t s = 0;
2220 
2221 	assert(thread_is_64bit_data(thread));
2222 
2223 	saved_state = thread->machine.upcb;
2224 	saved_state_64 = saved_state64(saved_state);
2225 	state = (arm_thread_state64_t *)tstate;
2226 
2227 	if (curth != thread) {
2228 		s = splsched();
2229 		thread_lock(thread);
2230 	}
2231 
2232 	/*
2233 	 * do not zero saved_state, it can be concurrently accessed
2234 	 * and zero is not a valid state for some of the registers,
2235 	 * like sp.
2236 	 */
2237 	thread_state64_to_saved_state(state, saved_state);
2238 	set_saved_state_cpsr(saved_state, PSR64_USER64_DEFAULT);
2239 
2240 	if (curth != thread) {
2241 		thread_unlock(thread);
2242 		splx(s);
2243 	}
2244 
2245 	return KERN_SUCCESS;
2246 }
2247