xref: /xnu-8019.80.24/osfmk/arm/status.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37 
38 struct arm_vfpv2_state {
39 	__uint32_t        __r[32];
40 	__uint32_t        __fpscr;
41 };
42 
43 typedef struct arm_vfpv2_state  arm_vfpv2_state_t;
44 
45 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
46 	(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
47 
48 
49 /*
50  * Forward definitions
51  */
52 void
53 thread_set_child(thread_t child, int pid);
54 
55 void
56 thread_set_parent(thread_t parent, int pid);
57 
58 /*
59  * Maps state flavor to number of words in the state:
60  */
61 /* __private_extern__ */
62 unsigned int    _MachineStateCount[] = {
63 	/* FLAVOR_LIST */ 0,
64 	[ARM_THREAD_STATE]    = ARM_THREAD_STATE_COUNT,
65 	[ARM_VFP_STATE]       = ARM_VFP_STATE_COUNT,
66 	[ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
67 	[ARM_DEBUG_STATE]     = ARM_DEBUG_STATE_COUNT,
68 	[ARM_PAGEIN_STATE]    = ARM_PAGEIN_STATE_COUNT,
69 };
70 
71 extern zone_t ads_zone;
72 
73 kern_return_t
machine_thread_state_convert_to_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t * count)74 machine_thread_state_convert_to_user(
75 	__unused thread_t thread,
76 	__unused thread_flavor_t flavor,
77 	__unused thread_state_t tstate,
78 	__unused mach_msg_type_number_t *count)
79 {
80 	// No conversion to userspace representation on this platform
81 	return KERN_SUCCESS;
82 }
83 
84 kern_return_t
machine_thread_state_convert_from_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t count)85 machine_thread_state_convert_from_user(
86 	__unused thread_t thread,
87 	__unused thread_flavor_t flavor,
88 	__unused thread_state_t tstate,
89 	__unused mach_msg_type_number_t count)
90 {
91 	// No conversion from userspace representation on this platform
92 	return KERN_SUCCESS;
93 }
94 
95 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(__unused thread_t thread,__unused user_addr_t * uctxp)96 machine_thread_siguctx_pointer_convert_to_user(
97 	__unused thread_t thread,
98 	__unused user_addr_t *uctxp)
99 {
100 	// No conversion to userspace representation on this platform
101 	return KERN_SUCCESS;
102 }
103 
104 kern_return_t
machine_thread_function_pointers_convert_from_user(__unused thread_t thread,__unused user_addr_t * fptrs,__unused uint32_t count)105 machine_thread_function_pointers_convert_from_user(
106 	__unused thread_t thread,
107 	__unused user_addr_t *fptrs,
108 	__unused uint32_t count)
109 {
110 	// No conversion from userspace representation on this platform
111 	return KERN_SUCCESS;
112 }
113 
114 /*
115  * Routine:	machine_thread_get_state
116  *
117  */
118 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)119 machine_thread_get_state(
120 	thread_t thread,
121 	thread_flavor_t flavor,
122 	thread_state_t tstate,
123 	mach_msg_type_number_t * count)
124 {
125 #define machine_thread_get_state_kprintf(x...)  /* kprintf("machine_thread_get
126 	                                         * _state: " x) */
127 
128 	switch (flavor) {
129 	case THREAD_STATE_FLAVOR_LIST:
130 		if (*count < 4) {
131 			return KERN_INVALID_ARGUMENT;
132 		}
133 
134 		tstate[0] = ARM_THREAD_STATE;
135 		tstate[1] = ARM_VFP_STATE;
136 		tstate[2] = ARM_EXCEPTION_STATE;
137 		tstate[3] = ARM_DEBUG_STATE;
138 		*count = 4;
139 		break;
140 
141 	case THREAD_STATE_FLAVOR_LIST_10_15:
142 		if (*count < 5) {
143 			return KERN_INVALID_ARGUMENT;
144 		}
145 
146 		tstate[0] = ARM_THREAD_STATE;
147 		tstate[1] = ARM_VFP_STATE;
148 		tstate[2] = ARM_EXCEPTION_STATE;
149 		tstate[3] = ARM_DEBUG_STATE;
150 		tstate[4] = ARM_PAGEIN_STATE;
151 		*count = 5;
152 		break;
153 
154 	case ARM_THREAD_STATE:{
155 		struct arm_thread_state *state;
156 		struct arm_saved_state *saved_state;
157 		arm_unified_thread_state_t *unified_state;
158 
159 		unsigned int    i;
160 		if (*count < ARM_THREAD_STATE_COUNT) {
161 			return KERN_INVALID_ARGUMENT;
162 		}
163 
164 		if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
165 			unified_state = (arm_unified_thread_state_t *) tstate;
166 			state = &unified_state->ts_32;
167 			unified_state->ash.flavor = ARM_THREAD_STATE32;
168 			unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
169 		} else {
170 			state = (struct arm_thread_state *) tstate;
171 		}
172 		saved_state = &thread->machine.PcbData;
173 
174 		state->sp = saved_state->sp;
175 		state->lr = saved_state->lr;
176 		state->pc = saved_state->pc;
177 		state->cpsr = saved_state->cpsr;
178 		for (i = 0; i < 13; i++) {
179 			state->r[i] = saved_state->r[i];
180 		}
181 		machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp  0x%x\n",
182 		    state->pc, state->r[0], state->sp);
183 
184 		if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
185 			*count = ARM_THREAD_STATE_COUNT;
186 		}
187 		break;
188 	}
189 	case ARM_EXCEPTION_STATE:{
190 		struct arm_exception_state *state;
191 		struct arm_saved_state *saved_state;
192 
193 		if (*count < ARM_EXCEPTION_STATE_COUNT) {
194 			return KERN_INVALID_ARGUMENT;
195 		}
196 
197 		state = (struct arm_exception_state *) tstate;
198 		saved_state = &thread->machine.PcbData;
199 
200 		state->exception = saved_state->exception;
201 		state->fsr = saved_state->fsr;
202 		state->far = saved_state->far;
203 
204 		*count = ARM_EXCEPTION_STATE_COUNT;
205 		break;
206 	}
207 	case ARM_VFP_STATE:{
208 #if     __ARM_VFP__
209 		struct arm_vfp_state *state;
210 		struct arm_vfpsaved_state *saved_state;
211 		unsigned int    i;
212 		unsigned int    max;
213 
214 		if (*count < ARM_VFP_STATE_COUNT) {
215 			if (*count < ARM_VFPV2_STATE_COUNT) {
216 				return KERN_INVALID_ARGUMENT;
217 			} else {
218 				*count =  ARM_VFPV2_STATE_COUNT;
219 			}
220 		}
221 
222 		if (*count == ARM_VFPV2_STATE_COUNT) {
223 			max = 32;
224 		} else {
225 			max = 64;
226 		}
227 
228 		state = (struct arm_vfp_state *) tstate;
229 		saved_state = find_user_vfp(thread);
230 
231 		state->fpscr = saved_state->fpscr;
232 		for (i = 0; i < max; i++) {
233 			state->r[i] = saved_state->r[i];
234 		}
235 
236 #endif
237 		break;
238 	}
239 	case ARM_DEBUG_STATE:{
240 		arm_debug_state_t *state;
241 		arm_debug_state_t *thread_state;
242 
243 		if (*count < ARM_DEBUG_STATE_COUNT) {
244 			return KERN_INVALID_ARGUMENT;
245 		}
246 
247 		state = (arm_debug_state_t *) tstate;
248 		thread_state = find_debug_state(thread);
249 
250 		if (thread_state == NULL) {
251 			bzero(state, sizeof(arm_debug_state_t));
252 		} else {
253 			bcopy(thread_state, state, sizeof(arm_debug_state_t));
254 		}
255 
256 		*count = ARM_DEBUG_STATE_COUNT;
257 		break;
258 	}
259 
260 	case ARM_PAGEIN_STATE:{
261 		arm_pagein_state_t *state;
262 
263 		if (*count < ARM_PAGEIN_STATE_COUNT) {
264 			return KERN_INVALID_ARGUMENT;
265 		}
266 
267 		state = (arm_pagein_state_t *)tstate;
268 		state->__pagein_error = thread->t_pagein_error;
269 
270 		*count = ARM_PAGEIN_STATE_COUNT;
271 		break;
272 	}
273 
274 	default:
275 		return KERN_INVALID_ARGUMENT;
276 	}
277 	return KERN_SUCCESS;
278 }
279 
280 
281 /*
282  * Routine:	machine_thread_get_kern_state
283  *
284  */
285 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)286 machine_thread_get_kern_state(
287 	thread_t thread,
288 	thread_flavor_t flavor,
289 	thread_state_t tstate,
290 	mach_msg_type_number_t * count)
291 {
292 #define machine_thread_get_kern_state_kprintf(x...)     /* kprintf("machine_threa
293 	                                                 * d_get_kern_state: "
294 	                                                 * x) */
295 
296 	/*
297 	 * This works only for an interrupted kernel thread
298 	 */
299 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
300 		return KERN_FAILURE;
301 	}
302 
303 	switch (flavor) {
304 	case ARM_THREAD_STATE:{
305 		struct arm_thread_state *state;
306 		struct arm_saved_state *saved_state;
307 		unsigned int    i;
308 		if (*count < ARM_THREAD_STATE_COUNT) {
309 			return KERN_INVALID_ARGUMENT;
310 		}
311 
312 		state = (struct arm_thread_state *) tstate;
313 		saved_state = getCpuDatap()->cpu_int_state;
314 
315 		state->sp = saved_state->sp;
316 		state->lr = saved_state->lr;
317 		state->pc = saved_state->pc;
318 		state->cpsr = saved_state->cpsr;
319 		for (i = 0; i < 13; i++) {
320 			state->r[i] = saved_state->r[i];
321 		}
322 		machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp  0x%x\n",
323 		    state->pc, state->r[0], state->sp);
324 		*count = ARM_THREAD_STATE_COUNT;
325 		break;
326 	}
327 	default:
328 		return KERN_INVALID_ARGUMENT;
329 	}
330 	return KERN_SUCCESS;
331 }
332 
333 extern long long arm_debug_get(void);
334 
335 /*
336  * Routine:	machine_thread_set_state
337  *
338  */
339 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)340 machine_thread_set_state(
341 	thread_t thread,
342 	thread_flavor_t flavor,
343 	thread_state_t tstate,
344 	mach_msg_type_number_t count)
345 {
346 #define machine_thread_set_state_kprintf(x...)  /* kprintf("machine_thread_set
347 	                                         * _state: " x) */
348 
349 	switch (flavor) {
350 	case ARM_THREAD_STATE:{
351 		struct arm_thread_state *state;
352 		struct arm_saved_state *saved_state;
353 		arm_unified_thread_state_t *unified_state;
354 		int             old_psr;
355 
356 		if (count < ARM_THREAD_STATE_COUNT) {
357 			return KERN_INVALID_ARGUMENT;
358 		}
359 
360 		if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
361 			unified_state = (arm_unified_thread_state_t *) tstate;
362 			state = &unified_state->ts_32;
363 		} else {
364 			state = (struct arm_thread_state *) tstate;
365 		}
366 		saved_state = &thread->machine.PcbData;
367 		old_psr = saved_state->cpsr;
368 		memcpy((char *) saved_state, (char *) state, sizeof(*state));
369 		/*
370 		 * do not allow privileged bits of the PSR to be
371 		 * changed
372 		 */
373 		saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
374 
375 		machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
376 		    state->pc, state->r[0], state->sp);
377 		break;
378 	}
379 	case ARM_VFP_STATE:{
380 #if __ARM_VFP__
381 		struct arm_vfp_state *state;
382 		struct arm_vfpsaved_state *saved_state;
383 		unsigned int    i;
384 		unsigned int    max;
385 
386 		if (count < ARM_VFP_STATE_COUNT) {
387 			if (count < ARM_VFPV2_STATE_COUNT) {
388 				return KERN_INVALID_ARGUMENT;
389 			} else {
390 				count =  ARM_VFPV2_STATE_COUNT;
391 			}
392 		}
393 
394 		if (count == ARM_VFPV2_STATE_COUNT) {
395 			max = 32;
396 		} else {
397 			max = 64;
398 		}
399 
400 		state = (struct arm_vfp_state *) tstate;
401 		saved_state = find_user_vfp(thread);
402 
403 		saved_state->fpscr = state->fpscr;
404 		for (i = 0; i < max; i++) {
405 			saved_state->r[i] = state->r[i];
406 		}
407 
408 #endif
409 		break;
410 	}
411 	case ARM_EXCEPTION_STATE:{
412 		if (count < ARM_EXCEPTION_STATE_COUNT) {
413 			return KERN_INVALID_ARGUMENT;
414 		}
415 
416 		break;
417 	}
418 	case ARM_DEBUG_STATE:{
419 		arm_debug_state_t *state;
420 		arm_debug_state_t *thread_state;
421 		boolean_t enabled = FALSE;
422 		unsigned int    i;
423 
424 		if (count < ARM_DEBUG_STATE_COUNT) {
425 			return KERN_INVALID_ARGUMENT;
426 		}
427 
428 		state = (arm_debug_state_t *) tstate;
429 		thread_state = find_debug_state(thread);
430 
431 		if (count < ARM_DEBUG_STATE_COUNT) {
432 			return KERN_INVALID_ARGUMENT;
433 		}
434 
435 		for (i = 0; i < 16; i++) {
436 			/* do not allow context IDs to be set */
437 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
438 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
439 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
440 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
441 				return KERN_PROTECTION_FAILURE;
442 			}
443 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
444 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
445 				enabled = TRUE;
446 			}
447 		}
448 
449 		if (!enabled) {
450 			if (thread_state != NULL) {
451 				void *pTmp = thread->machine.DebugData;
452 				thread->machine.DebugData = NULL;
453 				zfree(ads_zone, pTmp);
454 			}
455 		} else {
456 			if (thread_state == NULL) {
457 				thread_state = zalloc(ads_zone);
458 			}
459 
460 			for (i = 0; i < 16; i++) {
461 				/* set appropriate priviledge; mask out unknown bits */
462 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
463 				    | ARM_DBGBCR_MATCH_MASK
464 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
465 				    | ARM_DBG_CR_ENABLE_MASK))
466 				    | ARM_DBGBCR_TYPE_IVA
467 				    | ARM_DBG_CR_LINKED_UNLINKED
468 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
469 				    | ARM_DBG_CR_MODE_CONTROL_USER;
470 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
471 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
472 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
473 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
474 				    | ARM_DBG_CR_ENABLE_MASK))
475 				    | ARM_DBG_CR_LINKED_UNLINKED
476 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
477 				    | ARM_DBG_CR_MODE_CONTROL_USER;
478 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
479 			}
480 
481 			if (thread->machine.DebugData == NULL) {
482 				thread->machine.DebugData = thread_state;
483 			}
484 		}
485 
486 		if (thread == current_thread()) {
487 			arm_debug_set(thread_state);
488 		}
489 
490 		break;
491 	}
492 
493 	default:
494 		return KERN_INVALID_ARGUMENT;
495 	}
496 	return KERN_SUCCESS;
497 }
498 
499 mach_vm_address_t
machine_thread_pc(thread_t thread)500 machine_thread_pc(thread_t thread)
501 {
502 	struct arm_saved_state *ss = get_user_regs(thread);
503 	return (mach_vm_address_t)get_saved_state_pc(ss);
504 }
505 
506 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)507 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
508 {
509 	set_saved_state_pc(get_user_regs(thread), (register_t)pc);
510 }
511 
512 /*
513  * Routine:	machine_thread_state_initialize
514  *
515  */
516 void
machine_thread_state_initialize(thread_t thread)517 machine_thread_state_initialize(
518 	thread_t thread)
519 {
520 	struct arm_saved_state *savestate;
521 
522 	savestate = (struct arm_saved_state *) &thread->machine.PcbData;
523 	bzero((char *) savestate, sizeof(struct arm_saved_state));
524 	savestate->cpsr = PSR_USERDFLT;
525 
526 #if __ARM_VFP__
527 	vfp_state_initialize(&thread->machine.PcbData.VFPdata);
528 #endif
529 
530 	thread->machine.DebugData = NULL;
531 }
532 
533 #if __ARM_VFP__
534 void
vfp_state_initialize(struct arm_vfpsaved_state * vfp_state)535 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
536 {
537 	/* Set default VFP state to RunFast mode:
538 	 *
539 	 * - flush-to-zero mode
540 	 * - default NaN mode
541 	 * - no enabled exceptions
542 	 *
543 	 * On the VFP11, this allows the use of floating point without
544 	 * trapping to support code, which we do not provide.  With
545 	 * the Cortex-A8, this allows the use of the (much faster) NFP
546 	 * pipeline for single-precision operations.
547 	 */
548 
549 	bzero(vfp_state, sizeof(*vfp_state));
550 	vfp_state->fpscr = FPSCR_DEFAULT;
551 }
552 #endif /* __ARM_VFP__ */
553 
554 
555 /*
556  * Routine:	machine_thread_dup
557  *
558  */
559 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)560 machine_thread_dup(
561 	thread_t self,
562 	thread_t target,
563 	__unused boolean_t is_corpse)
564 {
565 	struct arm_saved_state *self_saved_state;
566 	struct arm_saved_state *target_saved_state;
567 
568 #if     __ARM_VFP__
569 	struct arm_vfpsaved_state *self_vfp_state;
570 	struct arm_vfpsaved_state *target_vfp_state;
571 #endif
572 
573 	target->machine.cthread_self = self->machine.cthread_self;
574 
575 	self_saved_state = &self->machine.PcbData;
576 	target_saved_state = &target->machine.PcbData;
577 	bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
578 
579 #if     __ARM_VFP__
580 	self_vfp_state = &self->machine.PcbData.VFPdata;
581 	target_vfp_state = &target->machine.PcbData.VFPdata;
582 	bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
583 #endif
584 
585 	return KERN_SUCCESS;
586 }
587 
588 /*
589  * Routine:	get_user_regs
590  *
591  */
592 struct arm_saved_state *
get_user_regs(thread_t thread)593 get_user_regs(
594 	thread_t thread)
595 {
596 	return &thread->machine.PcbData;
597 }
598 
599 /*
600  * Routine:	find_user_regs
601  *
602  */
603 struct arm_saved_state *
find_user_regs(thread_t thread)604 find_user_regs(
605 	thread_t thread)
606 {
607 	return get_user_regs(thread);
608 }
609 
610 /*
611  * Routine:	find_kern_regs
612  *
613  */
614 struct arm_saved_state *
find_kern_regs(thread_t thread)615 find_kern_regs(
616 	thread_t thread)
617 {
618 	/*
619 	 * This works only for an interrupted kernel thread
620 	 */
621 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
622 		return (struct arm_saved_state *) NULL;
623 	} else {
624 		return getCpuDatap()->cpu_int_state;
625 	}
626 }
627 
628 #if __ARM_VFP__
629 /*
630  *	Find the user state floating point context.  If there is no user state context,
631  *	we just return a 0.
632  */
633 
634 struct arm_vfpsaved_state *
find_user_vfp(thread_t thread)635 find_user_vfp(
636 	thread_t thread)
637 {
638 	return &thread->machine.PcbData.VFPdata;
639 }
640 #endif /* __ARM_VFP__ */
641 
642 arm_debug_state_t *
find_debug_state(thread_t thread)643 find_debug_state(
644 	thread_t thread)
645 {
646 	return thread->machine.DebugData;
647 }
648 
649 /*
650  * Routine:	thread_userstack
651  *
652  */
653 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,__unused boolean_t is64bit)654 thread_userstack(
655 	__unused thread_t thread,
656 	int flavor,
657 	thread_state_t tstate,
658 	unsigned int count,
659 	mach_vm_offset_t * user_stack,
660 	int *customstack,
661 	__unused boolean_t is64bit
662 	)
663 {
664 	switch (flavor) {
665 	case ARM_THREAD_STATE:
666 	{
667 		struct arm_thread_state *state;
668 
669 
670 		if (count < ARM_THREAD_STATE_COUNT) {
671 			return KERN_INVALID_ARGUMENT;
672 		}
673 
674 		if (customstack) {
675 			*customstack = 0;
676 		}
677 		state = (struct arm_thread_state *) tstate;
678 
679 		if (state->sp) {
680 			*user_stack = CAST_USER_ADDR_T(state->sp);
681 			if (customstack) {
682 				*customstack = 1;
683 			}
684 		} else {
685 			*user_stack = CAST_USER_ADDR_T(USRSTACK);
686 		}
687 	}
688 	break;
689 
690 	default:
691 		return KERN_INVALID_ARGUMENT;
692 	}
693 
694 	return KERN_SUCCESS;
695 }
696 
697 /*
698  * thread_userstackdefault:
699  *
700  * Return the default stack location for the
701  * thread, if otherwise unknown.
702  */
703 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit __unused)704 thread_userstackdefault(
705 	mach_vm_offset_t *default_user_stack,
706 	boolean_t is64bit __unused)
707 {
708 	*default_user_stack = USRSTACK;
709 
710 	return KERN_SUCCESS;
711 }
712 
713 /*
714  * Routine:	thread_setuserstack
715  *
716  */
717 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)718 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
719 {
720 	struct arm_saved_state *sv;
721 
722 #define thread_setuserstack_kprintf(x...)       /* kprintf("thread_setuserstac
723 	                                         * k: " x) */
724 
725 	sv = get_user_regs(thread);
726 
727 	sv->sp = user_stack;
728 
729 	thread_setuserstack_kprintf("stack %x\n", sv->sp);
730 
731 	return;
732 }
733 
734 /*
735  * Routine:	thread_adjuserstack
736  *
737  */
738 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)739 thread_adjuserstack(thread_t thread, int adjust)
740 {
741 	struct arm_saved_state *sv;
742 
743 	sv = get_user_regs(thread);
744 
745 	sv->sp += adjust;
746 
747 	return sv->sp;
748 }
749 
750 /*
751  * Routine:	thread_setentrypoint
752  *
753  */
754 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)755 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
756 {
757 	struct arm_saved_state *sv;
758 
759 #define thread_setentrypoint_kprintf(x...)      /* kprintf("thread_setentrypoi
760 	                                         * nt: " x) */
761 
762 	sv = get_user_regs(thread);
763 
764 	sv->pc = entry;
765 
766 	thread_setentrypoint_kprintf("entry %x\n", sv->pc);
767 
768 	return;
769 }
770 
771 /*
772  * Routine:	thread_entrypoint
773  *
774  */
775 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,__unused unsigned int count,mach_vm_offset_t * entry_point)776 thread_entrypoint(
777 	__unused thread_t thread,
778 	int flavor,
779 	thread_state_t tstate,
780 	__unused unsigned int count,
781 	mach_vm_offset_t * entry_point
782 	)
783 {
784 	switch (flavor) {
785 	case ARM_THREAD_STATE:
786 	{
787 		struct arm_thread_state *state;
788 
789 		if (count != ARM_THREAD_STATE_COUNT) {
790 			return KERN_INVALID_ARGUMENT;
791 		}
792 
793 		state = (struct arm_thread_state *) tstate;
794 
795 		/*
796 		 * If a valid entry point is specified, use it.
797 		 */
798 		if (state->pc) {
799 			*entry_point = CAST_USER_ADDR_T(state->pc);
800 		} else {
801 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
802 		}
803 	}
804 	break;
805 
806 	default:
807 		return KERN_INVALID_ARGUMENT;
808 	}
809 
810 	return KERN_SUCCESS;
811 }
812 
813 
814 /*
815  * Routine:	thread_set_child
816  *
817  */
818 void
thread_set_child(thread_t child,int pid)819 thread_set_child(
820 	thread_t child,
821 	int pid)
822 {
823 	struct arm_saved_state *child_state;
824 
825 	child_state = get_user_regs(child);
826 
827 	child_state->r[0] = (uint_t) pid;
828 	child_state->r[1] = 1ULL;
829 }
830 
831 
832 /*
833  * Routine:	thread_set_parent
834  *
835  */
836 void
thread_set_parent(thread_t parent,int pid)837 thread_set_parent(
838 	thread_t parent,
839 	int pid)
840 {
841 	struct arm_saved_state *parent_state;
842 
843 	parent_state = get_user_regs(parent);
844 
845 	parent_state->r[0] = pid;
846 	parent_state->r[1] = 0;
847 }
848 
849 
850 struct arm_act_context {
851 	struct arm_saved_state ss;
852 #if __ARM_VFP__
853 	struct arm_vfpsaved_state vfps;
854 #endif
855 };
856 
857 /*
858  * Routine:	act_thread_csave
859  *
860  */
861 void           *
act_thread_csave(void)862 act_thread_csave(void)
863 {
864 	struct arm_act_context *ic;
865 	kern_return_t   kret;
866 	unsigned int    val;
867 
868 	ic = kalloc_type(struct arm_act_context, Z_WAITOK | Z_NOFAIL);
869 
870 	val = ARM_THREAD_STATE_COUNT;
871 	kret = machine_thread_get_state(current_thread(),
872 	    ARM_THREAD_STATE,
873 	    (thread_state_t) &ic->ss,
874 	    &val);
875 	if (kret != KERN_SUCCESS) {
876 		act_thread_cfree(ic);
877 		return (void *) 0;
878 	}
879 #if __ARM_VFP__
880 	val = ARM_VFP_STATE_COUNT;
881 	kret = machine_thread_get_state(current_thread(),
882 	    ARM_VFP_STATE,
883 	    (thread_state_t) &ic->vfps,
884 	    &val);
885 	if (kret != KERN_SUCCESS) {
886 		act_thread_cfree(ic);
887 		return (void *) 0;
888 	}
889 #endif
890 	return ic;
891 }
892 
893 /*
894  * Routine:	act_thread_catt
895  *
896  */
897 void
act_thread_catt(void * ctx)898 act_thread_catt(void *ctx)
899 {
900 	struct arm_act_context *ic;
901 	kern_return_t   kret;
902 
903 	ic = (struct arm_act_context *) ctx;
904 
905 	if (ic == (struct arm_act_context *) NULL) {
906 		return;
907 	}
908 
909 	kret = machine_thread_set_state(current_thread(),
910 	    ARM_THREAD_STATE,
911 	    (thread_state_t) &ic->ss,
912 	    ARM_THREAD_STATE_COUNT);
913 	if (kret != KERN_SUCCESS) {
914 		goto out;
915 	}
916 
917 #if __ARM_VFP__
918 	kret = machine_thread_set_state(current_thread(),
919 	    ARM_VFP_STATE,
920 	    (thread_state_t) &ic->vfps,
921 	    ARM_VFP_STATE_COUNT);
922 	if (kret != KERN_SUCCESS) {
923 		goto out;
924 	}
925 #endif
926 out:
927 	act_thread_cfree(ic);
928 }
929 
930 /*
931  * Routine:	act_thread_catt
932  *
933  */
934 void
act_thread_cfree(void * ctx)935 act_thread_cfree(void *ctx)
936 {
937 	kfree_type(struct arm_act_context, ctx);
938 }
939 
940 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)941 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
942 {
943 	arm_thread_state_t *state;
944 	struct arm_saved_state *saved_state;
945 	thread_t curth = current_thread();
946 	spl_t s = 0;
947 
948 	saved_state = &thread->machine.PcbData;
949 	state = (arm_thread_state_t *)tstate;
950 
951 	if (curth != thread) {
952 		s = splsched();
953 		thread_lock(thread);
954 	}
955 
956 	/*
957 	 * do not zero saved_state, it can be concurrently accessed
958 	 * and zero is not a valid state for some of the registers,
959 	 * like sp.
960 	 */
961 	thread_state32_to_saved_state(state, saved_state);
962 	saved_state->cpsr = PSR_USERDFLT;
963 
964 	if (curth != thread) {
965 		thread_unlock(thread);
966 		splx(s);
967 	}
968 
969 	return KERN_SUCCESS;
970 }
971