xref: /xnu-8020.140.41/osfmk/arm/status.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <debug.h>
29 #include <mach/mach_types.h>
30 #include <mach/kern_return.h>
31 #include <mach/thread_status.h>
32 #include <kern/thread.h>
33 #include <kern/kalloc.h>
34 #include <arm/vmparam.h>
35 #include <arm/cpu_data_internal.h>
36 #include <arm/proc_reg.h>
37 
38 struct arm_vfpv2_state {
39 	__uint32_t        __r[32];
40 	__uint32_t        __fpscr;
41 };
42 
43 typedef struct arm_vfpv2_state  arm_vfpv2_state_t;
44 
45 #define ARM_VFPV2_STATE_COUNT ((mach_msg_type_number_t) \
46 	(sizeof (arm_vfpv2_state_t)/sizeof(uint32_t)))
47 
48 
49 /*
50  * Forward definitions
51  */
52 void
53 thread_set_child(thread_t child, int pid);
54 
55 void
56 thread_set_parent(thread_t parent, int pid);
57 
58 /*
59  * Maps state flavor to number of words in the state:
60  */
61 /* __private_extern__ */
62 unsigned int    _MachineStateCount[] = {
63 	/* FLAVOR_LIST */ 0,
64 	[ARM_THREAD_STATE]    = ARM_THREAD_STATE_COUNT,
65 	[ARM_VFP_STATE]       = ARM_VFP_STATE_COUNT,
66 	[ARM_EXCEPTION_STATE] = ARM_EXCEPTION_STATE_COUNT,
67 	[ARM_DEBUG_STATE]     = ARM_DEBUG_STATE_COUNT,
68 	[ARM_PAGEIN_STATE]    = ARM_PAGEIN_STATE_COUNT,
69 };
70 
71 extern zone_t ads_zone;
72 
73 kern_return_t
machine_thread_state_convert_to_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t * count,__unused thread_set_status_flags_t tssf_flags)74 machine_thread_state_convert_to_user(
75 	__unused thread_t thread,
76 	__unused thread_flavor_t flavor,
77 	__unused thread_state_t tstate,
78 	__unused mach_msg_type_number_t *count,
79 	__unused thread_set_status_flags_t tssf_flags)
80 {
81 	// No conversion to userspace representation on this platform
82 	return KERN_SUCCESS;
83 }
84 
85 kern_return_t
machine_thread_state_convert_from_user(__unused thread_t thread,__unused thread_flavor_t flavor,__unused thread_state_t tstate,__unused mach_msg_type_number_t count,__unused thread_state_t old_tstate,__unused mach_msg_type_number_t old_count,__unused thread_set_status_flags_t tssf_flags)86 machine_thread_state_convert_from_user(
87 	__unused thread_t thread,
88 	__unused thread_flavor_t flavor,
89 	__unused thread_state_t tstate,
90 	__unused mach_msg_type_number_t count,
91 	__unused thread_state_t old_tstate,
92 	__unused mach_msg_type_number_t old_count,
93 	__unused thread_set_status_flags_t tssf_flags)
94 {
95 	// No conversion from userspace representation on this platform
96 	return KERN_SUCCESS;
97 }
98 
99 kern_return_t
machine_thread_siguctx_pointer_convert_to_user(__unused thread_t thread,__unused user_addr_t * uctxp)100 machine_thread_siguctx_pointer_convert_to_user(
101 	__unused thread_t thread,
102 	__unused user_addr_t *uctxp)
103 {
104 	// No conversion to userspace representation on this platform
105 	return KERN_SUCCESS;
106 }
107 
108 kern_return_t
machine_thread_function_pointers_convert_from_user(__unused thread_t thread,__unused user_addr_t * fptrs,__unused uint32_t count)109 machine_thread_function_pointers_convert_from_user(
110 	__unused thread_t thread,
111 	__unused user_addr_t *fptrs,
112 	__unused uint32_t count)
113 {
114 	// No conversion from userspace representation on this platform
115 	return KERN_SUCCESS;
116 }
117 
118 /*
119  * Routine:	machine_thread_get_state
120  *
121  */
122 kern_return_t
machine_thread_get_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)123 machine_thread_get_state(
124 	thread_t thread,
125 	thread_flavor_t flavor,
126 	thread_state_t tstate,
127 	mach_msg_type_number_t * count)
128 {
129 #define machine_thread_get_state_kprintf(x...)  /* kprintf("machine_thread_get
130 	                                         * _state: " x) */
131 
132 	switch (flavor) {
133 	case THREAD_STATE_FLAVOR_LIST:
134 		if (*count < 4) {
135 			return KERN_INVALID_ARGUMENT;
136 		}
137 
138 		tstate[0] = ARM_THREAD_STATE;
139 		tstate[1] = ARM_VFP_STATE;
140 		tstate[2] = ARM_EXCEPTION_STATE;
141 		tstate[3] = ARM_DEBUG_STATE;
142 		*count = 4;
143 		break;
144 
145 	case THREAD_STATE_FLAVOR_LIST_10_15:
146 		if (*count < 5) {
147 			return KERN_INVALID_ARGUMENT;
148 		}
149 
150 		tstate[0] = ARM_THREAD_STATE;
151 		tstate[1] = ARM_VFP_STATE;
152 		tstate[2] = ARM_EXCEPTION_STATE;
153 		tstate[3] = ARM_DEBUG_STATE;
154 		tstate[4] = ARM_PAGEIN_STATE;
155 		*count = 5;
156 		break;
157 
158 	case ARM_THREAD_STATE:{
159 		struct arm_thread_state *state;
160 		struct arm_saved_state *saved_state;
161 		arm_unified_thread_state_t *unified_state;
162 
163 		unsigned int    i;
164 		if (*count < ARM_THREAD_STATE_COUNT) {
165 			return KERN_INVALID_ARGUMENT;
166 		}
167 
168 		if (*count == ARM_UNIFIED_THREAD_STATE_COUNT) {
169 			unified_state = (arm_unified_thread_state_t *) tstate;
170 			state = &unified_state->ts_32;
171 			unified_state->ash.flavor = ARM_THREAD_STATE32;
172 			unified_state->ash.count = ARM_THREAD_STATE32_COUNT;
173 		} else {
174 			state = (struct arm_thread_state *) tstate;
175 		}
176 		saved_state = &thread->machine.PcbData;
177 
178 		state->sp = saved_state->sp;
179 		state->lr = saved_state->lr;
180 		state->pc = saved_state->pc;
181 		state->cpsr = saved_state->cpsr;
182 		for (i = 0; i < 13; i++) {
183 			state->r[i] = saved_state->r[i];
184 		}
185 		machine_thread_get_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp  0x%x\n",
186 		    state->pc, state->r[0], state->sp);
187 
188 		if (*count != ARM_UNIFIED_THREAD_STATE_COUNT) {
189 			*count = ARM_THREAD_STATE_COUNT;
190 		}
191 		break;
192 	}
193 	case ARM_EXCEPTION_STATE:{
194 		struct arm_exception_state *state;
195 		struct arm_saved_state *saved_state;
196 
197 		if (*count < ARM_EXCEPTION_STATE_COUNT) {
198 			return KERN_INVALID_ARGUMENT;
199 		}
200 
201 		state = (struct arm_exception_state *) tstate;
202 		saved_state = &thread->machine.PcbData;
203 
204 		state->exception = saved_state->exception;
205 		state->fsr = saved_state->fsr;
206 		state->far = saved_state->far;
207 
208 		*count = ARM_EXCEPTION_STATE_COUNT;
209 		break;
210 	}
211 	case ARM_VFP_STATE:{
212 #if     __ARM_VFP__
213 		struct arm_vfp_state *state;
214 		struct arm_vfpsaved_state *saved_state;
215 		unsigned int    i;
216 		unsigned int    max;
217 
218 		if (*count < ARM_VFP_STATE_COUNT) {
219 			if (*count < ARM_VFPV2_STATE_COUNT) {
220 				return KERN_INVALID_ARGUMENT;
221 			} else {
222 				*count =  ARM_VFPV2_STATE_COUNT;
223 			}
224 		}
225 
226 		if (*count == ARM_VFPV2_STATE_COUNT) {
227 			max = 32;
228 		} else {
229 			max = 64;
230 		}
231 
232 		state = (struct arm_vfp_state *) tstate;
233 		saved_state = find_user_vfp(thread);
234 
235 		state->fpscr = saved_state->fpscr;
236 		for (i = 0; i < max; i++) {
237 			state->r[i] = saved_state->r[i];
238 		}
239 
240 #endif
241 		break;
242 	}
243 	case ARM_DEBUG_STATE:{
244 		arm_debug_state_t *state;
245 		arm_debug_state_t *thread_state;
246 
247 		if (*count < ARM_DEBUG_STATE_COUNT) {
248 			return KERN_INVALID_ARGUMENT;
249 		}
250 
251 		state = (arm_debug_state_t *) tstate;
252 		thread_state = find_debug_state(thread);
253 
254 		if (thread_state == NULL) {
255 			bzero(state, sizeof(arm_debug_state_t));
256 		} else {
257 			bcopy(thread_state, state, sizeof(arm_debug_state_t));
258 		}
259 
260 		*count = ARM_DEBUG_STATE_COUNT;
261 		break;
262 	}
263 
264 	case ARM_PAGEIN_STATE:{
265 		arm_pagein_state_t *state;
266 
267 		if (*count < ARM_PAGEIN_STATE_COUNT) {
268 			return KERN_INVALID_ARGUMENT;
269 		}
270 
271 		state = (arm_pagein_state_t *)tstate;
272 		state->__pagein_error = thread->t_pagein_error;
273 
274 		*count = ARM_PAGEIN_STATE_COUNT;
275 		break;
276 	}
277 
278 	default:
279 		return KERN_INVALID_ARGUMENT;
280 	}
281 	return KERN_SUCCESS;
282 }
283 
284 
285 /*
286  * Routine:	machine_thread_get_kern_state
287  *
288  */
289 kern_return_t
machine_thread_get_kern_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t * count)290 machine_thread_get_kern_state(
291 	thread_t thread,
292 	thread_flavor_t flavor,
293 	thread_state_t tstate,
294 	mach_msg_type_number_t * count)
295 {
296 #define machine_thread_get_kern_state_kprintf(x...)     /* kprintf("machine_threa
297 	                                                 * d_get_kern_state: "
298 	                                                 * x) */
299 
300 	/*
301 	 * This works only for an interrupted kernel thread
302 	 */
303 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
304 		return KERN_FAILURE;
305 	}
306 
307 	switch (flavor) {
308 	case ARM_THREAD_STATE:{
309 		struct arm_thread_state *state;
310 		struct arm_saved_state *saved_state;
311 		unsigned int    i;
312 		if (*count < ARM_THREAD_STATE_COUNT) {
313 			return KERN_INVALID_ARGUMENT;
314 		}
315 
316 		state = (struct arm_thread_state *) tstate;
317 		saved_state = getCpuDatap()->cpu_int_state;
318 
319 		state->sp = saved_state->sp;
320 		state->lr = saved_state->lr;
321 		state->pc = saved_state->pc;
322 		state->cpsr = saved_state->cpsr;
323 		for (i = 0; i < 13; i++) {
324 			state->r[i] = saved_state->r[i];
325 		}
326 		machine_thread_get_kern_state_kprintf("machine_thread_get_state: pc 0x%x r0 0x%x sp  0x%x\n",
327 		    state->pc, state->r[0], state->sp);
328 		*count = ARM_THREAD_STATE_COUNT;
329 		break;
330 	}
331 	default:
332 		return KERN_INVALID_ARGUMENT;
333 	}
334 	return KERN_SUCCESS;
335 }
336 
337 extern long long arm_debug_get(void);
338 
339 /*
340  * Routine:	machine_thread_set_state
341  *
342  */
343 kern_return_t
machine_thread_set_state(thread_t thread,thread_flavor_t flavor,thread_state_t tstate,mach_msg_type_number_t count)344 machine_thread_set_state(
345 	thread_t thread,
346 	thread_flavor_t flavor,
347 	thread_state_t tstate,
348 	mach_msg_type_number_t count)
349 {
350 #define machine_thread_set_state_kprintf(x...)  /* kprintf("machine_thread_set
351 	                                         * _state: " x) */
352 
353 	switch (flavor) {
354 	case ARM_THREAD_STATE:{
355 		struct arm_thread_state *state;
356 		struct arm_saved_state *saved_state;
357 		arm_unified_thread_state_t *unified_state;
358 		int             old_psr;
359 
360 		if (count < ARM_THREAD_STATE_COUNT) {
361 			return KERN_INVALID_ARGUMENT;
362 		}
363 
364 		if (count == ARM_UNIFIED_THREAD_STATE_COUNT) {
365 			unified_state = (arm_unified_thread_state_t *) tstate;
366 			state = &unified_state->ts_32;
367 		} else {
368 			state = (struct arm_thread_state *) tstate;
369 		}
370 		saved_state = &thread->machine.PcbData;
371 		old_psr = saved_state->cpsr;
372 		memcpy((char *) saved_state, (char *) state, sizeof(*state));
373 		/*
374 		 * do not allow privileged bits of the PSR to be
375 		 * changed
376 		 */
377 		saved_state->cpsr = (saved_state->cpsr & ~PSR_USER_MASK) | (old_psr & PSR_USER_MASK);
378 
379 		machine_thread_set_state_kprintf("machine_thread_set_state: pc 0x%x r0 0x%x sp 0x%x\n",
380 		    state->pc, state->r[0], state->sp);
381 		break;
382 	}
383 	case ARM_VFP_STATE:{
384 #if __ARM_VFP__
385 		struct arm_vfp_state *state;
386 		struct arm_vfpsaved_state *saved_state;
387 		unsigned int    i;
388 		unsigned int    max;
389 
390 		if (count < ARM_VFP_STATE_COUNT) {
391 			if (count < ARM_VFPV2_STATE_COUNT) {
392 				return KERN_INVALID_ARGUMENT;
393 			} else {
394 				count =  ARM_VFPV2_STATE_COUNT;
395 			}
396 		}
397 
398 		if (count == ARM_VFPV2_STATE_COUNT) {
399 			max = 32;
400 		} else {
401 			max = 64;
402 		}
403 
404 		state = (struct arm_vfp_state *) tstate;
405 		saved_state = find_user_vfp(thread);
406 
407 		saved_state->fpscr = state->fpscr;
408 		for (i = 0; i < max; i++) {
409 			saved_state->r[i] = state->r[i];
410 		}
411 
412 #endif
413 		break;
414 	}
415 	case ARM_EXCEPTION_STATE:{
416 		if (count < ARM_EXCEPTION_STATE_COUNT) {
417 			return KERN_INVALID_ARGUMENT;
418 		}
419 
420 		break;
421 	}
422 	case ARM_DEBUG_STATE:{
423 		arm_debug_state_t *state;
424 		arm_debug_state_t *thread_state;
425 		boolean_t enabled = FALSE;
426 		unsigned int    i;
427 
428 		if (count < ARM_DEBUG_STATE_COUNT) {
429 			return KERN_INVALID_ARGUMENT;
430 		}
431 
432 		state = (arm_debug_state_t *) tstate;
433 		thread_state = find_debug_state(thread);
434 
435 		if (count < ARM_DEBUG_STATE_COUNT) {
436 			return KERN_INVALID_ARGUMENT;
437 		}
438 
439 		for (i = 0; i < 16; i++) {
440 			/* do not allow context IDs to be set */
441 			if (((state->bcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
442 			    || ((state->bcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)
443 			    || ((state->wcr[i] & ARM_DBGBCR_TYPE_MASK) != ARM_DBGBCR_TYPE_IVA)
444 			    || ((state->wcr[i] & ARM_DBG_CR_LINKED_MASK) != ARM_DBG_CR_LINKED_UNLINKED)) {
445 				return KERN_PROTECTION_FAILURE;
446 			}
447 			if ((((state->bcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE))
448 			    || ((state->wcr[i] & ARM_DBG_CR_ENABLE_MASK) == ARM_DBG_CR_ENABLE_ENABLE)) {
449 				enabled = TRUE;
450 			}
451 		}
452 
453 		if (!enabled) {
454 			if (thread_state != NULL) {
455 				void *pTmp = thread->machine.DebugData;
456 				thread->machine.DebugData = NULL;
457 				zfree(ads_zone, pTmp);
458 			}
459 		} else {
460 			if (thread_state == NULL) {
461 				thread_state = zalloc(ads_zone);
462 			}
463 
464 			for (i = 0; i < 16; i++) {
465 				/* set appropriate priviledge; mask out unknown bits */
466 				thread_state->bcr[i] = (state->bcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
467 				    | ARM_DBGBCR_MATCH_MASK
468 				    | ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK
469 				    | ARM_DBG_CR_ENABLE_MASK))
470 				    | ARM_DBGBCR_TYPE_IVA
471 				    | ARM_DBG_CR_LINKED_UNLINKED
472 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
473 				    | ARM_DBG_CR_MODE_CONTROL_USER;
474 				thread_state->bvr[i] = state->bvr[i] & ARM_DBG_VR_ADDRESS_MASK;
475 				thread_state->wcr[i] = (state->wcr[i] & (ARM_DBG_CR_ADDRESS_MASK_MASK
476 				    | ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK
477 				    | ARM_DBGWCR_ACCESS_CONTROL_MASK
478 				    | ARM_DBG_CR_ENABLE_MASK))
479 				    | ARM_DBG_CR_LINKED_UNLINKED
480 				    | ARM_DBG_CR_SECURITY_STATE_BOTH
481 				    | ARM_DBG_CR_MODE_CONTROL_USER;
482 				thread_state->wvr[i] = state->wvr[i] & ARM_DBG_VR_ADDRESS_MASK;
483 			}
484 
485 			if (thread->machine.DebugData == NULL) {
486 				thread->machine.DebugData = thread_state;
487 			}
488 		}
489 
490 		if (thread == current_thread()) {
491 			arm_debug_set(thread_state);
492 		}
493 
494 		break;
495 	}
496 
497 	default:
498 		return KERN_INVALID_ARGUMENT;
499 	}
500 	return KERN_SUCCESS;
501 }
502 
503 mach_vm_address_t
machine_thread_pc(thread_t thread)504 machine_thread_pc(thread_t thread)
505 {
506 	struct arm_saved_state *ss = get_user_regs(thread);
507 	return (mach_vm_address_t)get_saved_state_pc(ss);
508 }
509 
510 void
machine_thread_reset_pc(thread_t thread,mach_vm_address_t pc)511 machine_thread_reset_pc(thread_t thread, mach_vm_address_t pc)
512 {
513 	set_saved_state_pc(get_user_regs(thread), (register_t)pc);
514 }
515 
516 /*
517  * Routine:	machine_thread_state_initialize
518  *
519  */
520 void
machine_thread_state_initialize(thread_t thread)521 machine_thread_state_initialize(
522 	thread_t thread)
523 {
524 	struct arm_saved_state *savestate;
525 
526 	savestate = (struct arm_saved_state *) &thread->machine.PcbData;
527 	bzero((char *) savestate, sizeof(struct arm_saved_state));
528 	savestate->cpsr = PSR_USERDFLT;
529 
530 #if __ARM_VFP__
531 	vfp_state_initialize(&thread->machine.PcbData.VFPdata);
532 #endif
533 
534 	thread->machine.DebugData = NULL;
535 }
536 
537 #if __ARM_VFP__
538 void
vfp_state_initialize(struct arm_vfpsaved_state * vfp_state)539 vfp_state_initialize(struct arm_vfpsaved_state *vfp_state)
540 {
541 	/* Set default VFP state to RunFast mode:
542 	 *
543 	 * - flush-to-zero mode
544 	 * - default NaN mode
545 	 * - no enabled exceptions
546 	 *
547 	 * On the VFP11, this allows the use of floating point without
548 	 * trapping to support code, which we do not provide.  With
549 	 * the Cortex-A8, this allows the use of the (much faster) NFP
550 	 * pipeline for single-precision operations.
551 	 */
552 
553 	bzero(vfp_state, sizeof(*vfp_state));
554 	vfp_state->fpscr = FPSCR_DEFAULT;
555 }
556 #endif /* __ARM_VFP__ */
557 
558 
559 /*
560  * Routine:	machine_thread_dup
561  *
562  */
563 kern_return_t
machine_thread_dup(thread_t self,thread_t target,__unused boolean_t is_corpse)564 machine_thread_dup(
565 	thread_t self,
566 	thread_t target,
567 	__unused boolean_t is_corpse)
568 {
569 	struct arm_saved_state *self_saved_state;
570 	struct arm_saved_state *target_saved_state;
571 
572 #if     __ARM_VFP__
573 	struct arm_vfpsaved_state *self_vfp_state;
574 	struct arm_vfpsaved_state *target_vfp_state;
575 #endif
576 
577 	target->machine.cthread_self = self->machine.cthread_self;
578 
579 	self_saved_state = &self->machine.PcbData;
580 	target_saved_state = &target->machine.PcbData;
581 	bcopy(self_saved_state, target_saved_state, sizeof(struct arm_saved_state));
582 
583 #if     __ARM_VFP__
584 	self_vfp_state = &self->machine.PcbData.VFPdata;
585 	target_vfp_state = &target->machine.PcbData.VFPdata;
586 	bcopy(self_vfp_state, target_vfp_state, sizeof(struct arm_vfpsaved_state));
587 #endif
588 
589 	return KERN_SUCCESS;
590 }
591 
592 /*
593  * Routine:	get_user_regs
594  *
595  */
596 struct arm_saved_state *
get_user_regs(thread_t thread)597 get_user_regs(
598 	thread_t thread)
599 {
600 	return &thread->machine.PcbData;
601 }
602 
603 /*
604  * Routine:	find_user_regs
605  *
606  */
607 struct arm_saved_state *
find_user_regs(thread_t thread)608 find_user_regs(
609 	thread_t thread)
610 {
611 	return get_user_regs(thread);
612 }
613 
614 /*
615  * Routine:	find_kern_regs
616  *
617  */
618 struct arm_saved_state *
find_kern_regs(thread_t thread)619 find_kern_regs(
620 	thread_t thread)
621 {
622 	/*
623 	 * This works only for an interrupted kernel thread
624 	 */
625 	if (thread != current_thread() || getCpuDatap()->cpu_int_state == NULL) {
626 		return (struct arm_saved_state *) NULL;
627 	} else {
628 		return getCpuDatap()->cpu_int_state;
629 	}
630 }
631 
632 #if __ARM_VFP__
633 /*
634  *	Find the user state floating point context.  If there is no user state context,
635  *	we just return a 0.
636  */
637 
638 struct arm_vfpsaved_state *
find_user_vfp(thread_t thread)639 find_user_vfp(
640 	thread_t thread)
641 {
642 	return &thread->machine.PcbData.VFPdata;
643 }
644 #endif /* __ARM_VFP__ */
645 
646 arm_debug_state_t *
find_debug_state(thread_t thread)647 find_debug_state(
648 	thread_t thread)
649 {
650 	return thread->machine.DebugData;
651 }
652 
653 /*
654  * Routine:	thread_userstack
655  *
656  */
657 kern_return_t
thread_userstack(__unused thread_t thread,int flavor,thread_state_t tstate,unsigned int count,mach_vm_offset_t * user_stack,int * customstack,__unused boolean_t is64bit)658 thread_userstack(
659 	__unused thread_t thread,
660 	int flavor,
661 	thread_state_t tstate,
662 	unsigned int count,
663 	mach_vm_offset_t * user_stack,
664 	int *customstack,
665 	__unused boolean_t is64bit
666 	)
667 {
668 	switch (flavor) {
669 	case ARM_THREAD_STATE:
670 	{
671 		struct arm_thread_state *state;
672 
673 
674 		if (count < ARM_THREAD_STATE_COUNT) {
675 			return KERN_INVALID_ARGUMENT;
676 		}
677 
678 		if (customstack) {
679 			*customstack = 0;
680 		}
681 		state = (struct arm_thread_state *) tstate;
682 
683 		if (state->sp) {
684 			*user_stack = CAST_USER_ADDR_T(state->sp);
685 			if (customstack) {
686 				*customstack = 1;
687 			}
688 		} else {
689 			*user_stack = CAST_USER_ADDR_T(USRSTACK);
690 		}
691 	}
692 	break;
693 
694 	default:
695 		return KERN_INVALID_ARGUMENT;
696 	}
697 
698 	return KERN_SUCCESS;
699 }
700 
701 /*
702  * thread_userstackdefault:
703  *
704  * Return the default stack location for the
705  * thread, if otherwise unknown.
706  */
707 kern_return_t
thread_userstackdefault(mach_vm_offset_t * default_user_stack,boolean_t is64bit __unused)708 thread_userstackdefault(
709 	mach_vm_offset_t *default_user_stack,
710 	boolean_t is64bit __unused)
711 {
712 	*default_user_stack = USRSTACK;
713 
714 	return KERN_SUCCESS;
715 }
716 
717 /*
718  * Routine:	thread_setuserstack
719  *
720  */
721 void
thread_setuserstack(thread_t thread,mach_vm_address_t user_stack)722 thread_setuserstack(thread_t thread, mach_vm_address_t user_stack)
723 {
724 	struct arm_saved_state *sv;
725 
726 #define thread_setuserstack_kprintf(x...)       /* kprintf("thread_setuserstac
727 	                                         * k: " x) */
728 
729 	sv = get_user_regs(thread);
730 
731 	sv->sp = user_stack;
732 
733 	thread_setuserstack_kprintf("stack %x\n", sv->sp);
734 
735 	return;
736 }
737 
738 /*
739  * Routine:	thread_adjuserstack
740  *
741  */
742 user_addr_t
thread_adjuserstack(thread_t thread,int adjust)743 thread_adjuserstack(thread_t thread, int adjust)
744 {
745 	struct arm_saved_state *sv;
746 
747 	sv = get_user_regs(thread);
748 
749 	sv->sp += adjust;
750 
751 	return sv->sp;
752 }
753 
754 /*
755  * Routine:	thread_setentrypoint
756  *
757  */
758 void
thread_setentrypoint(thread_t thread,mach_vm_offset_t entry)759 thread_setentrypoint(thread_t thread, mach_vm_offset_t entry)
760 {
761 	struct arm_saved_state *sv;
762 
763 #define thread_setentrypoint_kprintf(x...)      /* kprintf("thread_setentrypoi
764 	                                         * nt: " x) */
765 
766 	sv = get_user_regs(thread);
767 
768 	sv->pc = entry;
769 
770 	thread_setentrypoint_kprintf("entry %x\n", sv->pc);
771 
772 	return;
773 }
774 
775 /*
776  * Routine:	thread_entrypoint
777  *
778  */
779 kern_return_t
thread_entrypoint(__unused thread_t thread,int flavor,thread_state_t tstate,__unused unsigned int count,mach_vm_offset_t * entry_point)780 thread_entrypoint(
781 	__unused thread_t thread,
782 	int flavor,
783 	thread_state_t tstate,
784 	__unused unsigned int count,
785 	mach_vm_offset_t * entry_point
786 	)
787 {
788 	switch (flavor) {
789 	case ARM_THREAD_STATE:
790 	{
791 		struct arm_thread_state *state;
792 
793 		if (count != ARM_THREAD_STATE_COUNT) {
794 			return KERN_INVALID_ARGUMENT;
795 		}
796 
797 		state = (struct arm_thread_state *) tstate;
798 
799 		/*
800 		 * If a valid entry point is specified, use it.
801 		 */
802 		if (state->pc) {
803 			*entry_point = CAST_USER_ADDR_T(state->pc);
804 		} else {
805 			*entry_point = CAST_USER_ADDR_T(VM_MIN_ADDRESS);
806 		}
807 	}
808 	break;
809 
810 	default:
811 		return KERN_INVALID_ARGUMENT;
812 	}
813 
814 	return KERN_SUCCESS;
815 }
816 
817 
818 /*
819  * Routine:	thread_set_child
820  *
821  */
822 void
thread_set_child(thread_t child,int pid)823 thread_set_child(
824 	thread_t child,
825 	int pid)
826 {
827 	struct arm_saved_state *child_state;
828 
829 	child_state = get_user_regs(child);
830 
831 	child_state->r[0] = (uint_t) pid;
832 	child_state->r[1] = 1ULL;
833 }
834 
835 
836 /*
837  * Routine:	thread_set_parent
838  *
839  */
840 void
thread_set_parent(thread_t parent,int pid)841 thread_set_parent(
842 	thread_t parent,
843 	int pid)
844 {
845 	struct arm_saved_state *parent_state;
846 
847 	parent_state = get_user_regs(parent);
848 
849 	parent_state->r[0] = pid;
850 	parent_state->r[1] = 0;
851 }
852 
853 
854 struct arm_act_context {
855 	struct arm_saved_state ss;
856 #if __ARM_VFP__
857 	struct arm_vfpsaved_state vfps;
858 #endif
859 };
860 
861 /*
862  * Routine:	act_thread_csave
863  *
864  */
865 void           *
act_thread_csave(void)866 act_thread_csave(void)
867 {
868 	struct arm_act_context *ic;
869 	kern_return_t   kret;
870 	unsigned int    val;
871 
872 	ic = kalloc_type(struct arm_act_context, Z_WAITOK | Z_NOFAIL);
873 
874 	val = ARM_THREAD_STATE_COUNT;
875 	kret = machine_thread_get_state(current_thread(),
876 	    ARM_THREAD_STATE,
877 	    (thread_state_t) &ic->ss,
878 	    &val);
879 	if (kret != KERN_SUCCESS) {
880 		act_thread_cfree(ic);
881 		return (void *) 0;
882 	}
883 #if __ARM_VFP__
884 	val = ARM_VFP_STATE_COUNT;
885 	kret = machine_thread_get_state(current_thread(),
886 	    ARM_VFP_STATE,
887 	    (thread_state_t) &ic->vfps,
888 	    &val);
889 	if (kret != KERN_SUCCESS) {
890 		act_thread_cfree(ic);
891 		return (void *) 0;
892 	}
893 #endif
894 	return ic;
895 }
896 
897 /*
898  * Routine:	act_thread_catt
899  *
900  */
901 void
act_thread_catt(void * ctx)902 act_thread_catt(void *ctx)
903 {
904 	struct arm_act_context *ic;
905 	kern_return_t   kret;
906 
907 	ic = (struct arm_act_context *) ctx;
908 
909 	if (ic == (struct arm_act_context *) NULL) {
910 		return;
911 	}
912 
913 	kret = machine_thread_set_state(current_thread(),
914 	    ARM_THREAD_STATE,
915 	    (thread_state_t) &ic->ss,
916 	    ARM_THREAD_STATE_COUNT);
917 	if (kret != KERN_SUCCESS) {
918 		goto out;
919 	}
920 
921 #if __ARM_VFP__
922 	kret = machine_thread_set_state(current_thread(),
923 	    ARM_VFP_STATE,
924 	    (thread_state_t) &ic->vfps,
925 	    ARM_VFP_STATE_COUNT);
926 	if (kret != KERN_SUCCESS) {
927 		goto out;
928 	}
929 #endif
930 out:
931 	act_thread_cfree(ic);
932 }
933 
934 /*
935  * Routine:	act_thread_catt
936  *
937  */
938 void
act_thread_cfree(void * ctx)939 act_thread_cfree(void *ctx)
940 {
941 	kfree_type(struct arm_act_context, ctx);
942 }
943 
944 kern_return_t
thread_set_wq_state32(thread_t thread,thread_state_t tstate)945 thread_set_wq_state32(thread_t thread, thread_state_t tstate)
946 {
947 	arm_thread_state_t *state;
948 	struct arm_saved_state *saved_state;
949 	thread_t curth = current_thread();
950 	spl_t s = 0;
951 
952 	saved_state = &thread->machine.PcbData;
953 	state = (arm_thread_state_t *)tstate;
954 
955 	if (curth != thread) {
956 		s = splsched();
957 		thread_lock(thread);
958 	}
959 
960 	/*
961 	 * do not zero saved_state, it can be concurrently accessed
962 	 * and zero is not a valid state for some of the registers,
963 	 * like sp.
964 	 */
965 	thread_state32_to_saved_state(state, saved_state);
966 	saved_state->cpsr = PSR_USERDFLT;
967 
968 	if (curth != thread) {
969 		thread_unlock(thread);
970 		splx(s);
971 	}
972 
973 	return KERN_SUCCESS;
974 }
975