xref: /xnu-8019.80.24/osfmk/arm/locore.s (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1/*
2 * Copyright (c) 2007-2011 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 *  Software Distribution Coordinator  or  [email protected]
49 *  School of Computer Science
50 *  Carnegie Mellon University
51 *  Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57#include <machine/asm.h>
58#include <arm/proc_reg.h>
59#include <pexpert/arm/board_config.h>
60#include <mach/arm/traps.h>
61#include <mach/exception_types.h>
62#include <mach_kdp.h>
63#include <mach_assert.h>
64#include <config_dtrace.h>
65#include "assym.s"
66#include "dwarf_unwind.h"
67
68#define TRACE_SYSCALL 0
69
70/*
71 * Copied to low physical memory in arm_init,
72 * so the kernel must be linked virtually at
73 * 0xc0001000 or higher to leave space for it.
74 */
75	.syntax unified
76	.text
77	.align 12
78	.globl EXT(ExceptionLowVectorsBase)
79
80LEXT(ExceptionLowVectorsBase)
81	adr	pc, Lreset_low_vector
82	b	.	// Undef
83	b	.	// SWI
84	b	.	// Prefetch Abort
85	b	.	// Data Abort
86	b	.	// Address Exception
87	b	.	// IRQ
88	b	.	// FIQ/DEC
89LEXT(ResetPrivateData)
90	.space  (480),0		// (filled with 0s)
91	// ExceptionLowVectorsBase + 0x200
92Lreset_low_vector:
93	adr		r4, EXT(ResetHandlerData)
94	ldr		r0, [r4, ASSIST_RESET_HANDLER]
95	movs	r0, r0
96	blxne	r0
97	adr		r4, EXT(ResetHandlerData)
98	ldr		r1, [r4, CPU_DATA_ENTRIES]
99	ldr		r1, [r1, CPU_DATA_PADDR]
100	ldr		r5, [r1, CPU_RESET_ASSIST]
101	movs	r5, r5
102	blxne	r5
103	adr		r4, EXT(ResetHandlerData)
104	ldr		r0, [r4, BOOT_ARGS]
105	ldr		r1, [r4, CPU_DATA_ENTRIES]
106#if	defined(ARMA7)
107	// physical cpu number is stored in MPIDR Affinity level 0
108	mrc		p15, 0, r6, c0, c0, 5				// Read MPIDR
109	and		r6, r6, #0xFF						// Extract Affinity level 0
110#else
111#error missing Who Am I implementation
112#endif
113	// physical cpu number matches cpu number
114//#if cdeSize != 16
115//#error cpu_data_entry is not 16bytes in size
116//#endif
117	lsl		r6, r6, #4							// Get CpuDataEntry offset
118	add		r1, r1, r6							// Get  cpu_data_entry pointer
119	ldr		r1, [r1, CPU_DATA_PADDR]
120	ldr		r5, [r1, CPU_RESET_HANDLER]
121	movs	r5, r5
122	blxne	r5									// Branch to cpu reset handler
123	b		.									// Unexpected reset
124	.globl  EXT(ResetHandlerData)
125LEXT(ResetHandlerData)
126	.space  (rhdSize_NUM),0		// (filled with 0s)
127
128
129        .globl EXT(ExceptionLowVectorsEnd)
130LEXT(ExceptionLowVectorsEnd)
131
132	.text
133	.align 12
134	.globl EXT(ExceptionVectorsBase)
135
136LEXT(ExceptionVectorsBase)
137
138	adr	pc, Lexc_reset_vector
139	adr	pc, Lexc_undefined_inst_vector
140	adr	pc, Lexc_swi_vector
141	adr	pc, Lexc_prefetch_abort_vector
142	adr	pc, Lexc_data_abort_vector
143	adr	pc, Lexc_address_exception_vector
144	adr	pc, Lexc_irq_vector
145#if __ARM_TIME__
146	adr	pc, Lexc_decirq_vector
147#else /* ! __ARM_TIME__ */
148	mov	pc, r9
149#endif /* __ARM_TIME__ */
150
151Lexc_reset_vector:
152	b	.
153	.long	0x0
154	.long	0x0
155	.long	0x0
156Lexc_undefined_inst_vector:
157	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
158	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
159	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
160	ldr		pc, [sp, #4]						// Branch to exception handler
161Lexc_swi_vector:
162	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
163	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
164	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
165	ldr		pc, [sp, #8]						// Branch to exception handler
166Lexc_prefetch_abort_vector:
167	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
168	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
169	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
170	ldr		pc, [sp, #0xC]						// Branch to exception handler
171Lexc_data_abort_vector:
172	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
173	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
174	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
175	ldr		pc, [sp, #0x10]						// Branch to exception handler
176Lexc_address_exception_vector:
177	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
178	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
179	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
180	ldr		pc, [sp, #0x14]						// Branch to exception handler
181Lexc_irq_vector:
182	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
183	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
184	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
185	ldr		pc, [sp, #0x18]						// Branch to exception handler
186#if __ARM_TIME__
187Lexc_decirq_vector:
188	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
189	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu data
190	ldr		sp, [sp, CPU_EXC_VECTORS]			// Get exception vector table
191	ldr		pc, [sp, #0x1C]						// Branch to exception handler
192#else /* ! __ARM_TIME__ */
193	.long	0x0
194	.long	0x0
195	.long	0x0
196	.long	0x0
197#endif /* __ARM_TIME__ */
198
199	.fill   984, 4, 0						// Push to the 4KB page boundary
200
201    .globl EXT(ExceptionVectorsEnd)
202LEXT(ExceptionVectorsEnd)
203
204
205/*
206 * Targets for the exception vectors; we patch these during boot (to allow
207 * for position independent code without complicating the vectors; see start.s).
208 */
209	.globl EXT(ExceptionVectorsTable)
210LEXT(ExceptionVectorsTable)
211Lreset_vector:
212	.long	0x0
213Lundefined_inst_vector:
214	.long	0x0
215Lswi_vector:
216	.long	0x0
217Lprefetch_abort_vector:
218	.long	0x0
219Ldata_abort_vector:
220	.long	0x0
221Laddress_exception_vector:
222	.long	0x0
223Lirq_vector:
224	.long	0x0
225Ldecirq_vector:
226	.long	0x0
227
228
229/*
230 *	First Level Exception Handlers
231 */
232	.text
233	.align 2
234	.globl EXT(fleh_reset)
235LEXT(fleh_reset)
236	b		.									// Never return
237
238/*
239 *	First Level Exception Handler for Undefined Instruction.
240 */
241	.text
242	.align 2
243	.globl EXT(fleh_undef)
244
245/*
246 *	Ensures the stack is safely aligned, usually in preparation for an external branch
247 *	arg0: temp register for storing the stack offset
248 *	arg1: temp register for storing the previous stack pointer
249 */
250.macro ALIGN_STACK
251/*
252 * For armv7k ABI, the stack needs to be 16-byte aligned
253 */
254#if __BIGGEST_ALIGNMENT__ > 4
255	and		$0, sp, #0x0F						// sp mod 16-bytes
256	cmp		$0, #4							// need space for the sp on the stack
257	addlt		$0, $0, #0x10						// make room if needed, but keep stack aligned
258	mov		$1, sp							// get current sp
259	sub		sp, sp, $0						// align stack
260	str		$1, [sp]						// store previous sp on stack
261#endif
262.endmacro
263
264/*
265 *	Restores the stack pointer to its previous value following an ALIGN_STACK call
266 */
267.macro UNALIGN_STACK
268#if __BIGGEST_ALIGNMENT__ > 4
269	ldr		sp, [sp]
270#endif
271.endmacro
272
273/*
274 *	Checks that cpu is currently in the expected mode, panics if not.
275 *	arg0: the expected mode, should be one of the PSR_*_MODE defines
276 */
277.macro VERIFY_EXCEPTION_MODE
278	mrs		sp, cpsr 							// Read cpsr
279	and		sp, sp, #PSR_MODE_MASK					// Extract current mode
280	cmp		sp, $0							// Check specified mode
281	movne		r0, sp
282	bne		EXT(ExceptionVectorPanic)
283.endmacro
284
285/*
286 *	Checks previous processor mode.  If usermode, will execute the code
287 *	following the macro to handle the userspace exception.  Otherwise,
288 *	will branch to a ELSE_IF_KERNELMODE_EXCEPTION call with the same
289 *	argument.
290 *	arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
291 */
292.macro IF_USERMODE_EXCEPTION
293	mrs		sp, spsr
294	and		sp, sp, #PSR_MODE_MASK						// Is it from user?
295	cmp		sp, #PSR_USER_MODE
296	beq		$0_from_user
297	cmp		sp, #PSR_IRQ_MODE
298	beq		$0_from_irq
299	cmp		sp, #PSR_FIQ_MODE
300	beq		$0_from_fiq
301	bne		$0_from_svc
302$0_from_user:
303.endmacro
304
305/*
306 *	Handles an exception taken from kernelmode (IRQ/FIQ/SVC/etc).
307 *	Places the processor into the correct mode and executes the
308 *	code following the macro to handle the kernel exception.
309 *	Intended to be paired with a prior call to IF_USERMODE_EXCEPTION.
310 *	arg0: arbitrary string indicating the exception class, e.g. 'dataabt'
311 */
312.macro ELSE_IF_KERNELMODE_EXCEPTION
313$0_from_irq:
314	cpsid		i, #PSR_IRQ_MODE
315	b		$0_from_kernel
316$0_from_fiq:
317	cpsid		i, #PSR_FIQ_MODE
318	b		$0_from_kernel
319$0_from_svc:
320	cpsid		i, #PSR_SVC_MODE
321$0_from_kernel:
322.endmacro
323
324LEXT(fleh_undef)
325VERIFY_EXCEPTION_MODE PSR_UND_MODE
326	mrs		sp, spsr							// For check the previous mode
327	tst		sp, #PSR_TF							// Is it Thumb?
328	subeq		lr, lr, #4
329	subne		lr, lr, #2
330IF_USERMODE_EXCEPTION undef
331	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
332	add		sp, sp, ACT_PCBDATA				// Get current thread PCB pointer
333
334	stmia	sp, {r0-r12, sp, lr}^				// Save user context on PCB
335	mov		r7, #0								// Zero the frame pointer
336	nop
337
338	mov		r0, sp								// Store arm_saved_state pointer
339												//  for argument
340
341	str		lr, [sp, SS_PC]						// Save user mode pc register
342
343	mrs		r4, spsr
344	str		r4, [sp, SS_CPSR]					// Save user mode cpsr
345
346	cpsid i, #PSR_SVC_MODE
347	mrs		r3, cpsr 							// Read cpsr
348	msr		spsr_cxsf, r3                       // Set spsr(svc mode cpsr)
349	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
350	ldr		sp, [r9, TH_KSTACKPTR]				// Load kernel stack
351#if __ARM_USER_PROTECT__
352	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
353	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
354	mov		r3, #0								// Load kernel asid
355	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
356	isb
357#endif
358
359	mvn		r0, #0
360	str		r0, [r9, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before handling abort from userspace
361
362#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
363	bl		EXT(timer_state_event_user_to_kernel)
364	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
365#endif
366
367#if __ARM_VFP__
368	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
369	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
370	mov		r3, #FPSCR_DEFAULT				// Load up the default FPSCR value...
371	fmxr		fpscr, r3					// And shove it into FPSCR
372	add		r1, r9, ACT_UVFP				// Reload the pointer to the save state
373	add		r0, r9, ACT_PCBDATA				// Reload the VFP save state argument
374#else
375	mov		r1, #0                              		// Clear the VFP save state argument
376	add		r0, r9, ACT_PCBDATA					// Reload arm_saved_state pointer
377#endif
378
379	bl		EXT(sleh_undef)						// Call second level handler
380												//   sleh will enable interrupt
381	b		load_and_go_user
382
383ELSE_IF_KERNELMODE_EXCEPTION undef
384	/*
385	 * We have a kernel stack already, and I will use it to save contexts
386	 * IRQ is disabled
387	 */
388#if CONFIG_DTRACE
389	// We need a frame for backtracing. The LR here is the LR of supervisor mode, not the location where the exception
390	// took place. We'll store that later after we switch to undef mode and pull out the LR from there.
391
392	// This frame is consumed by fbt_invop. Any changes with the size or location of this frame will probably require
393	// changes in fbt_invop also.
394	stmfd sp!, { r7, lr }
395#endif
396
397	sub		sp, sp, EXC_CTX_SIZE						// Reserve for arm_saved_state
398
399	stmia	sp, {r0-r12}						// Save on supervisor mode stack
400	str		lr, [sp, SS_LR]
401
402#if CONFIG_DTRACE
403	add		r7, sp, EXC_CTX_SIZE						// Save frame pointer
404#endif
405
406	mrs		r4, lr_und
407	str		r4, [sp, SS_PC]						// Save complete
408	mrs		r4, spsr_und
409	str		r4, [sp, SS_CPSR]
410
411	mov		ip, sp
412
413/*
414   sp - stack pointer
415   ip - stack pointer
416   r7 - frame pointer state
417 */
418
419
420#if CONFIG_DTRACE
421	ldr		r0, [ip, SS_PC]						// Get the exception pc to store later
422#endif
423
424	add		ip, ip, EXC_CTX_SIZE						// Send stack pointer to debugger
425#if CONFIG_DTRACE
426	str		r0, [ip, #4]
427	add		ip, ip, #8
428#endif
429	str		ip, [sp, SS_SP]						// for accessing local variable
430#if CONFIG_DTRACE
431	sub		ip, ip, #8
432#endif
433	sub		ip, ip, EXC_CTX_SIZE
434
435#if __ARM_VFP__
436	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
437	add		r0, sp, SS_SIZE					// Get vfp state pointer
438	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
439	add		r0, VSS_ALIGN					// Get the actual vfp save area
440	mov		r5, r0						// Stash the save area in another register
441	bl		EXT(vfp_save)					// Save the current VFP state to the stack
442	mov		r1, r5						// Load the VFP save area argument
443	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
444	fmxr		fpscr, r4					// And shove it into FPSCR
445#else
446	mov     r1, #0                              // Clear the facility context argument
447#endif
448#if __ARM_USER_PROTECT__
449	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
450	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
451	cmp		r3, r10
452	beq		1f
453	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
4541:
455	mrc		p15, 0, r11, c13, c0, 1				// Save CONTEXTIDR
456	mov		r3, #0								// Load kernel asid
457	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
458	isb
459#endif
460	mov		r0, sp								// Argument
461
462	ALIGN_STACK r2, r3
463	bl		EXT(sleh_undef)						// Call second level handler
464	UNALIGN_STACK
465
466#if __ARM_USER_PROTECT__
467	mrc		p15, 0, r9, c13, c0, 4              // Read TPIDRPRW
468	ldr		r0, [r9, ACT_KPTW_TTB]              // Load kernel ttb
469	cmp		r10, r0
470	beq		1f
471	ldr		r10, [r9, ACT_UPTW_TTB]             // Load thread ttb
472	cmp		r10, r0
473	beq		1f
474	mcr		p15, 0, r10, c2, c0, 0              // Set TTBR0
475	ldr		r11, [r9, ACT_ASID]                 // Load thread asid
4761:
477	mcr		p15, 0, r11, c13, c0, 1             // set CONTEXTIDR
478	isb
479#endif
480	b		load_and_go_sys
481
482
483/*
484 * First Level Exception Handler for Software Interrupt
485 *
486 *	We assert that only user level can use the "SWI" instruction for a system
487 *	call on development kernels, and assume it's true on release.
488 *
489 *	System call number is stored in r12.
490 *	System call arguments are stored in r0 to r6 and r8 (we skip r7)
491 *
492 */
493	.text
494	.align 5
495	.globl EXT(fleh_swi)
496
497LEXT(fleh_swi)
498	cpsid	i, #PSR_ABT_MODE
499	mov		sp, ip								// Save ip
500	cpsid	i, #PSR_SVC_MODE
501	mrs		ip, spsr							// Check the previous mode
502	tst		ip, #0x0f
503	cpsid	i, #PSR_ABT_MODE
504	mov		ip, sp								// Restore ip
505	cpsid	i, #PSR_SVC_MODE
506	beq		swi_from_user
507
508/* Only user mode can use SWI. Panic if the kernel tries. */
509swi_from_kernel:
510	sub     sp, sp, EXC_CTX_SIZE
511	stmia	sp, {r0-r12}
512	add		r0, sp, EXC_CTX_SIZE
513
514	str		r0, [sp, SS_SP]						// Save supervisor mode sp
515	str		lr, [sp, SS_LR]                     // Save supervisor mode lr
516
517	ALIGN_STACK r0, r1
518	adr		r0, L_kernel_swi_panic_str			// Load panic messages and panic()
519	blx		EXT(panic)
520	b		.
521
522swi_from_user:
523	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
524	add		sp, sp, ACT_PCBDATA					// Get User PCB
525
526
527	/* Check for special mach_absolute_time trap value.
528	 * This is intended to be a super-lightweight call to ml_get_timebase(), which
529	 * is handrolled assembly and does not use the stack, thus not requiring us to setup a kernel stack. */
530	cmp		r12, #MACH_ARM_TRAP_ABSTIME
531	beq		fleh_swi_trap_tb
532	stmia	sp, {r0-r12, sp, lr}^				// Save user context on PCB
533	mov		r7, #0								// Zero the frame pointer
534	nop
535	mov		r8, sp								// Store arm_saved_state pointer
536	add		sp, sp, SS_PC
537	srsia sp, 	#PSR_SVC_MODE
538	mrs		r3, cpsr 							// Read cpsr
539	msr		spsr_cxsf, r3                       // Set spsr(svc mode cpsr)
540	sub		r9, sp, ACT_PCBDATA_PC
541
542	ldr		sp, [r9, TH_KSTACKPTR]				// Load kernel stack
543	mov		r11, r12							// save the syscall vector in a nontrashed register
544
545#if __ARM_VFP__
546	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
547	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
548	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
549	fmxr		fpscr, r4					// And shove it into FPSCR
550#endif
551#if __ARM_USER_PROTECT__
552	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
553	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
554	mov		r3, #0								// Load kernel asid
555	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
556	isb
557#endif
558
559	mvn		r0, #0
560	str		r0, [r9, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before handling SWI from userspace
561
562#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
563	bl		EXT(timer_state_event_user_to_kernel)
564	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
565	add		r8, r9, ACT_PCBDATA				// Reload arm_saved_state pointer
566#endif
567	ldr		r10, [r9, ACT_TASK]				// Load the current task
568
569	/* enable interrupts */
570	cpsie	i							// Enable IRQ
571
572	cmp		r11, #MACH_ARM_TRAP_CONTTIME			// Special value for mach_continuous_time
573	beq		fleh_swi_trap_mct
574
575	cmp		r11, #0x80000000
576	beq		fleh_swi_trap
577fleh_swi_trap_ret:
578
579#if TRACE_SYSCALL
580	/* trace the syscall */
581	mov		r0, r8
582	bl		EXT(syscall_trace)
583#endif
584
585	bl		EXT(mach_kauth_cred_thread_update)
586	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
587	/* unix syscall? */
588	rsbs	r5, r11, #0							// make the syscall positive (if negative)
589	ble		fleh_swi_unix						// positive syscalls are unix (note reverse logic here)
590
591fleh_swi_mach:
592	/* note that mach_syscall_trace can modify r9, so increment the thread
593	 * syscall count before the call : */
594	ldr		r2, [r9, TH_MACH_SYSCALLS]
595	add		r2, r2, #1
596	str		r2, [r9, TH_MACH_SYSCALLS]
597
598	LOAD_ADDR(r1, mach_trap_table)				// load mach_trap_table
599#if MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 12
600	add		r11, r5, r5, lsl #1					// syscall * 3
601	add		r6, r1, r11, lsl #2					// trap_table + syscall * 12
602#elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 16
603	add		r6, r1, r5, lsl #4					// trap_table + syscall * 16
604#elif MACH_TRAP_TABLE_ENTRY_SIZE_NUM == 20
605	add		r11, r5, r5, lsl #2					// syscall * 5
606	add		r6, r1, r11, lsl #2					// trap_table + syscall * 20
607#else
608#error mach_trap_t size unhandled (see MACH_TRAP_TABLE_ENTRY_SIZE)!
609#endif
610
611#ifndef	NO_KDEBUG
612	LOAD_ADDR(r4, kdebug_enable)
613	ldr		r4, [r4]
614	movs	r4, r4
615	movne	r0, r8								// ready the reg state pointer as an arg to the call
616	movne	r1, r5								// syscall number as 2nd arg
617	COND_EXTERN_BLNE(mach_syscall_trace)
618#endif
619	adr		lr,	fleh_swi_exit					// any calls from here on out will return to our exit path
620	cmp		r5, MACH_TRAP_TABLE_COUNT			// check syscall number range
621	bge		fleh_swi_mach_error
622
623/*
624 * For arm32 ABI where 64-bit types are aligned to even registers and
625 * 64-bits on stack, we need to unpack registers differently. So
626 * we use the mungers for marshalling in arguments from user space.
627 * Currently this is just ARMv7k.
628 */
629#if __BIGGEST_ALIGNMENT__ > 4
630	sub		sp, #0x40						// allocate buffer and keep stack 128-bit aligned
631	                                            				//     it should be big enough for all syscall arguments
632	ldr		r11, [r6, #8]						// get mach_trap_table[call_number].mach_trap_arg_munge32
633	teq		r11, #0							// check if we have a munger
634	moveq		r0, #0
635	movne		r0, r8							// ready the reg state pointer as an arg to the call
636	movne		r1, sp							// stack will hold arguments buffer
637	blxne		r11							// call munger to get arguments from userspace
638	adr		lr,	fleh_swi_exit					// any calls from here on out will return to our exit path
639	teq		r0, #0
640	bne		fleh_swi_mach_error					// exit if the munger returned non-zero status
641#endif
642
643	ldr		r1, [r6, #4]						// load the syscall vector
644
645	LOAD_ADDR(r2, kern_invalid)					// test to make sure the trap is not kern_invalid
646	teq		r1, r2
647	beq		fleh_swi_mach_error
648
649#if __BIGGEST_ALIGNMENT__ > 4
650	mov		r0, sp								// argument buffer on stack
651	bx		r1									// call the syscall handler
652#else
653	mov		r0, r8								// ready the reg state pointer as an arg to the call
654	bx		r1									// call the syscall handler
655#endif
656
657fleh_swi_exit64:
658	str		r1, [r8, #4]						// top of 64-bit return
659fleh_swi_exit:
660	str		r0, [r8]							// save the return value
661#ifndef	NO_KDEBUG
662	movs	r4, r4
663	movne	r1, r5
664	COND_EXTERN_BLNE(mach_syscall_trace_exit)
665#endif
666#if TRACE_SYSCALL
667	bl		EXT(syscall_trace_exit)
668#endif
669
670	mov		r0, #1
671	bl		EXT(throttle_lowpri_io)				// throttle_lowpri_io(1);
672
673	bl		EXT(thread_exception_return)
674	b		.
675
676fleh_swi_mach_error:
677	mov		r0, #EXC_SYSCALL
678	sub		r1, sp, #4
679	mov		r2, #1
680	bl		EXT(exception_triage)
681	b		.
682
683	.align	5
684fleh_swi_unix:
685	ldr		r1, [r9, TH_UNIX_SYSCALLS]
686	mov		r0, r8								// reg state structure is arg
687	add		r1, r1, #1
688	str		r1, [r9, TH_UNIX_SYSCALLS]
689	mov		r1, r9								// current thread in arg1
690	ldr		r2, [r10, TASK_BSD_INFO]			// current proc in arg2
691	bl		EXT(unix_syscall)
692	b		.
693
694fleh_swi_trap:
695	ldmia		r8, {r0-r3}
696	cmp		r3, #3
697	addls	pc, pc, r3, LSL#2
698	b		fleh_swi_trap_ret
699	b		icache_invalidate_trap
700	b		dcache_flush_trap
701	b		thread_set_cthread_trap
702	b		thread_get_cthread_trap
703
704icache_invalidate_trap:
705	add		r3, r0, r1
706	cmp		r3, VM_MAX_ADDRESS
707	subhi	r3, r3, #1<<MMU_CLINE
708	bhi		cache_trap_error
709	adr		r11, cache_trap_jmp
710	ldr		r6,  [r9, TH_RECOVER]				// Save existing recovery routine
711	str		r11, [r9, TH_RECOVER]
712#if __ARM_USER_PROTECT__
713	ldr     r5, [r9, ACT_UPTW_TTB]				// Load thread ttb
714	mcr		p15, 0, r5, c2, c0, 0				// Set TTBR0
715	ldr     r5, [r9, ACT_ASID]					// Load thread asid
716	mcr		p15, 0, r5, c13, c0, 1				// Set CONTEXTIDR
717	dsb		ish
718	isb
719#endif
720	bl		EXT(InvalidatePoU_IcacheRegion)
721	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
722#if __ARM_USER_PROTECT__
723	ldr		r4, [r9, ACT_KPTW_TTB]				// Load kernel ttb
724	mcr		p15, 0, r4, c2, c0, 0				// Set TTBR0
725	mov		r4, #0								// Load kernel asid
726	mcr		p15, 0, r4, c13, c0, 1				// Set CONTEXTIDR
727	isb
728#endif
729	str		r6, [r9, TH_RECOVER]
730	bl		EXT(thread_exception_return)
731	b		.
732
733dcache_flush_trap:
734	add		r3, r0, r1
735	cmp		r3, VM_MAX_ADDRESS
736	subhi	r3, r3, #1<<MMU_CLINE
737	bhi		cache_trap_error
738	adr		r11, cache_trap_jmp
739	ldr		r4,  [r9, TH_RECOVER]				// Save existing recovery routine
740	str		r11, [r9, TH_RECOVER]
741#if __ARM_USER_PROTECT__
742	ldr     r6, [r9, ACT_UPTW_TTB]              // Load thread ttb
743	mcr		p15, 0, r6, c2, c0, 0				// Set TTBR0
744	ldr     r5, [r9, ACT_ASID]					// Load thread asid
745	mcr		p15, 0, r5, c13, c0, 1				// Set CONTEXTIDR
746	isb
747#endif
748	bl		EXT(flush_dcache_syscall)
749	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
750#if __ARM_USER_PROTECT__
751	ldr		r5, [r9, ACT_KPTW_TTB]				// Load kernel ttb
752	mcr		p15, 0, r5, c2, c0, 0				// Set TTBR0
753	mov		r5, #0								// Load kernel asid
754	mcr		p15, 0, r5, c13, c0, 1				// Set CONTEXTIDR
755	isb
756#endif
757	str		r4, [r9, TH_RECOVER]
758	bl		EXT(thread_exception_return)
759	b		.
760
761thread_set_cthread_trap:
762	bl		EXT(thread_set_cthread_self)
763	bl		EXT(thread_exception_return)
764	b		.
765
766thread_get_cthread_trap:
767	bl		EXT(thread_get_cthread_self)
768	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
769	add		r1, r9, ACT_PCBDATA					// Get User PCB
770	str		r0, [r1, SS_R0]						// set return value
771	bl		EXT(thread_exception_return)
772	b		.
773
774cache_trap_jmp:
775#if __ARM_USER_PROTECT__
776	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
777	ldr		r5, [r9, ACT_KPTW_TTB]				// Load kernel ttb
778	mcr		p15, 0, r5, c2, c0, 0				// Set TTBR0
779	mov		r5, #0								// Load kernel asid
780	mcr		p15, 0, r5, c13, c0, 1				// Set CONTEXTIDR
781	isb
782#endif
783	mrc		p15, 0, r3, c6, c0 					// Read Fault Address
784cache_trap_error:
785	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
786	add		r0, r9, ACT_PCBDATA					// Get User PCB
787	ldr		r1, [r0, SS_PC]						// Save user mode pc register as pc
788	sub		r1, r1, #4							// Backtrack current pc
789	str		r1, [r0, SS_PC]						// pc at cache assist swi
790	str		r3, [r0, SS_VADDR]					// Fault Address
791	mov		r0, #EXC_BAD_ACCESS
792	mov		r2, KERN_INVALID_ADDRESS
793	sub		sp, sp, #8
794	mov		r1, sp
795	str		r2, [sp]
796	str		r3, [sp, #4]
797	ALIGN_STACK r2, r3
798	mov		r2, #2
799	bl		EXT(exception_triage)
800	b		.
801
802fleh_swi_trap_mct:
803	bl 		EXT(mach_continuous_time)
804	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
805	add		r9, r9, ACT_PCBDATA_R0				// Get User register state
806	stmia		r9, {r0, r1}					// set 64-bit return value
807	bl		EXT(thread_exception_return)
808	b		.
809
810fleh_swi_trap_tb:
811	str		lr, [sp, SS_PC]
812	bl		EXT(ml_get_timebase)				// ml_get_timebase() (64-bit return)
813	ldr		lr, [sp, SS_PC]
814	nop
815	movs	pc, lr								// Return to user
816
817	.align  2
818L_kernel_swi_panic_str:
819	.asciz  "fleh_swi: took SWI from kernel mode\n"
820	.align	2
821
822/*
823 * First Level Exception Handler for Prefetching Abort.
824 */
825	.text
826	.align 2
827	.globl EXT(fleh_prefabt)
828
829LEXT(fleh_prefabt)
830VERIFY_EXCEPTION_MODE PSR_ABT_MODE
831	sub		lr, lr, #4
832
833IF_USERMODE_EXCEPTION prefabt
834	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
835	add		sp, sp, ACT_PCBDATA					// Get User PCB
836
837	stmia   sp, {r0-r12, sp, lr}^				// Save user context on PCB
838	mov		r7, #0								// Zero the frame pointer
839	nop
840	mov     r0, sp								// Store arm_saved_state pointer
841												// For argument
842	str		lr, [sp, SS_PC]						// Save user mode pc register as pc
843	mrc		p15, 0, r1, c6, c0, 2 				// Read IFAR
844	str		r1, [sp, SS_VADDR]					// and fault address of pcb
845
846	mrc		p15, 0, r5, c5, c0, 1 				// Read Fault Status
847	str		r5, [sp, SS_STATUS]					// Save fault status register to pcb
848
849	mrs     r4, spsr
850	str     r4, [sp, SS_CPSR]					// Save user mode cpsr
851
852	cpsid	i, #PSR_SVC_MODE
853	mrs		r3, cpsr 							// Read cpsr
854	msr		spsr_cxsf, r3                       // Set spsr(svc mode cpsr)
855	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
856	ldr		sp, [r9, TH_KSTACKPTR]				// Load kernel stack
857
858#if __ARM_VFP__
859	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
860	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
861	mov		r3, #FPSCR_DEFAULT				// Load up the default FPSCR value...
862	fmxr		fpscr, r3					// And shove it into FPSCR
863#endif
864#if __ARM_USER_PROTECT__
865	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
866	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
867	mov		r3, #0								// Load kernel asid
868	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
869	isb
870#endif
871
872	mvn		r0, #0
873	str		r0, [r9, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before handling abort from userspace
874
875#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
876	bl		EXT(timer_state_event_user_to_kernel)
877	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
878#endif
879
880	add		r0, r9, ACT_PCBDATA					// Reload arm_saved_state pointer
881	mov		r1, T_PREFETCH_ABT					// Pass abort type
882	bl		EXT(sleh_abort)						// Call second level handler
883												// Sleh will enable interrupt
884	b		load_and_go_user
885
886ELSE_IF_KERNELMODE_EXCEPTION prefabt
887
888UNWIND_PROLOGUE
889
890	/*
891	 * We have a kernel stack already, and I will use it to save contexts:
892	 *     ------------------
893	 *    | VFP saved state  |
894	 *    |------------------|
895	 *    | ARM saved state  |
896	 * SP  ------------------
897	 *
898	 * IRQ is disabled
899	 */
900	sub     sp, sp, EXC_CTX_SIZE
901	stmia	sp, {r0-r12}
902	add		r0, sp, EXC_CTX_SIZE
903
904	str		r0, [sp, SS_SP]						// Save supervisor mode sp
905	str		lr, [sp, SS_LR]                     // Save supervisor mode lr
906
907	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
908
909#if __ARM_VFP__
910	add		r0, sp, SS_SIZE					// Get vfp state pointer
911	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
912	add		r0, VSS_ALIGN					// Get the actual vfp save area
913	bl		EXT(vfp_save)					// Save the current VFP state to the stack
914	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
915	fmxr		fpscr, r4					// And shove it into FPSCR
916#endif
917#if __ARM_USER_PROTECT__
918	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
919	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
920	cmp		r3, r10
921	beq		1f
922	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
9231:
924	mrc		p15, 0, r11, c13, c0, 1				// Save CONTEXTIDR
925	mov		r3, #0								// Load kernel asid
926	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
927	isb
928#endif
929
930	mrs		r4, lr_abt
931	str		r4, [sp, SS_PC]					// Save pc
932
933	mrc		p15, 0, r5, c6, c0, 2 				// Read IFAR
934	str		r5, [sp, SS_VADDR]					// and fault address of pcb
935	mrc		p15, 0, r5, c5, c0, 1 				// Read (instruction) Fault Status
936	str		r5, [sp, SS_STATUS]					// Save fault status register to pcb
937
938	mrs		r4, spsr_abt
939	str		r4, [sp, SS_CPSR]
940
941	mov		r0, sp
942	ALIGN_STACK r1, r2
943	mov		r1, T_PREFETCH_ABT					// Pass abort type
944
945
946UNWIND_DIRECTIVES
947
948	bl		EXT(sleh_abort) 					// Call second level handler
949	UNALIGN_STACK
950
951	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
952#if __ARM_USER_PROTECT__
953	ldr		r0, [r9, ACT_KPTW_TTB]              // Load kernel ttb
954	cmp		r10, r0
955	beq		1f
956	ldr		r10, [r9, ACT_UPTW_TTB]             // Load thread ttb
957	cmp		r10, r0
958	beq		1f
959	mcr		p15, 0, r10, c2, c0, 0              // Set TTBR0
960	ldr		r11, [r9, ACT_ASID]                 // Load thread asid
9611:
962	mcr		p15, 0, r11, c13, c0, 1             // set CONTEXTIDR
963	isb
964#endif
965
966	b		load_and_go_sys
967
968UNWIND_EPILOGUE
969
970/*
971 * First Level Exception Handler for Data Abort
972 */
973	.text
974	.align 2
975	.globl EXT(fleh_dataabt)
976
977LEXT(fleh_dataabt)
978VERIFY_EXCEPTION_MODE PSR_ABT_MODE
979	sub		lr, lr, #8
980IF_USERMODE_EXCEPTION dataabt
981	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
982	add		sp, sp, ACT_PCBDATA					// Get User PCB
983
984	stmia	sp, {r0-r12, sp, lr}^				// Save user context on PCB
985	mov		r7, #0								// Zero the frame pointer
986	nop
987
988	mov		r0, sp								// Store arm_saved_state pointer
989												// For argument
990
991	str		lr, [sp, SS_PC]						// Save user mode pc register
992
993	mrs		r4, spsr
994	str		r4, [sp, SS_CPSR]					// Save user mode cpsr
995
996	mrc		p15, 0, r5, c5, c0 					// Read Fault Status
997	mrc		p15, 0, r6, c6, c0 					// Read Fault Address
998	str		r5, [sp, SS_STATUS]					// Save fault status register to pcb
999	str		r6, [sp, SS_VADDR]					// Save fault address to pcb
1000
1001	cpsid	i, #PSR_SVC_MODE
1002	mrs		r3, cpsr 							// Read cpsr
1003	msr		spsr_cxsf, r3                       // Set spsr(svc mode cpsr)
1004	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1005	ldr		sp, [r9, TH_KSTACKPTR]				// Load kernel stack
1006
1007#if __ARM_VFP__
1008	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
1009	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
1010	mov		r3, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1011	fmxr		fpscr, r3					// And shove it into FPSCR
1012#endif
1013#if __ARM_USER_PROTECT__
1014	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1015	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1016	mov		r3, #0								// Load kernel asid
1017	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1018	isb
1019#endif
1020
1021	mvn		r0, #0
1022	str		r0, [r9, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before handling abort from userspace
1023
1024#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1025	bl		EXT(timer_state_event_user_to_kernel)
1026	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1027#endif
1028
1029	add		r0, r9, ACT_PCBDATA					// Reload arm_saved_state pointer
1030	mov     r1, T_DATA_ABT						// Pass abort type
1031	bl		EXT(sleh_abort)						// Call second level handler
1032												// Sleh will enable irq
1033	b		load_and_go_user
1034
1035ELSE_IF_KERNELMODE_EXCEPTION dataabt
1036
1037UNWIND_PROLOGUE
1038
1039	/*
1040	 * We have a kernel stack already, and I will use it to save contexts:
1041	 *     ------------------
1042	 *    | VFP saved state  |
1043	 *    |------------------|
1044	 *    | ARM saved state  |
1045	 * SP  ------------------
1046	 *
1047	 * IRQ is disabled
1048	 */
1049	sub     sp, sp, EXC_CTX_SIZE
1050	stmia	sp, {r0-r12}
1051	add		r0, sp, EXC_CTX_SIZE
1052
1053	str		r0, [sp, SS_SP]						// Save supervisor mode sp
1054	str		lr, [sp, SS_LR]                     // Save supervisor mode lr
1055
1056	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1057
1058#if __ARM_VFP__
1059	add		r0, sp, SS_SIZE					// Get vfp state pointer
1060	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1061	add		r0, VSS_ALIGN					// Get the actual vfp save area
1062	bl		EXT(vfp_save)					// Save the current VFP state to the stack
1063	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1064	fmxr		fpscr, r4					// And shove it into FPSCR
1065#endif
1066
1067	mrs		r4, lr_abt
1068	str		r4, [sp, SS_PC]
1069	mrs		r4, spsr_abt
1070	str		r4, [sp, SS_CPSR]
1071
1072#if __ARM_USER_PROTECT__
1073	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1074	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1075	cmp		r3, r10
1076	beq		1f
1077	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
10781:
1079	mrc		p15, 0, r11, c13, c0, 1				// Save CONTEXTIDR
1080	mov		r3, #0								// Load kernel asid
1081	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1082	isb
1083#endif
1084	mrc		p15, 0, r5, c5, c0					// Read Fault Status
1085	mrc		p15, 0, r6, c6, c0					// Read Fault Address
1086	str		r5, [sp, SS_STATUS]					// Save fault status register to pcb
1087	str		r6, [sp, SS_VADDR]					// Save fault address to pcb
1088
1089	mov		r0, sp								// Argument
1090	ALIGN_STACK r1, r2
1091	mov		r1, T_DATA_ABT						// Pass abort type
1092
1093UNWIND_DIRECTIVES
1094
1095	bl		EXT(sleh_abort)						// Call second level handler
1096	UNALIGN_STACK
1097
1098	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1099#if __ARM_USER_PROTECT__
1100	ldr		r0, [r9, ACT_KPTW_TTB]              // Load kernel ttb
1101	cmp		r10, r0
1102	beq		1f
1103	ldr		r10, [r9, ACT_UPTW_TTB]             // Load thread ttb
1104	cmp		r10, r0
1105	beq		1f
1106	mcr		p15, 0, r10, c2, c0, 0              // Set TTBR0
1107	ldr		r11, [r9, ACT_ASID]                 // Load thread asid
11081:
1109	mcr		p15, 0, r11, c13, c0, 1             // set CONTEXTIDR
1110	isb
1111#endif
1112
1113load_and_go_sys:
1114	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1115
1116	ldr		r4, [sp, SS_CPSR]					// Load saved cpsr
1117	tst		r4, #PSR_IRQF						// Test IRQ set
1118	bne		lags1								// Branch if IRQ disabled
1119
1120	cpsid	i									// Disable IRQ
1121	ldr		r2, [r9, ACT_PREEMPT_CNT]           // Load preemption count
1122	movs	r2, r2								// Test if null
1123	ldr		r8, [r9, ACT_CPUDATAP]				// Get current cpu
1124	bne		lags1								// Branch if count not null
1125	ldr		r5, [r8, CPU_PENDING_AST]			// Get ASTs
1126	ands	r5, r5, AST_URGENT					// Get the requests we do honor
1127	beq		lags1								// Branch if no ASTs
1128#if __ARM_USER_PROTECT__
1129	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1130	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1131	cmp		r3, r10
1132	beq		1f
1133	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
11341:
1135	mrc		p15, 0, r11, c13, c0, 1				// Save CONTEXTIDR
1136	mov		r3, #0								// Load kernel asid
1137	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1138	isb
1139#endif
1140	ldr		lr, [sp, SS_LR]							// Restore the link register
1141	stmfd		sp!, {r7, lr}							// Push a fake frame
1142
1143	ALIGN_STACK r2, r3
1144	bl		EXT(ast_taken_kernel)				// Handle AST_URGENT
1145	UNALIGN_STACK
1146
1147	ldmfd		sp!, {r7, lr}							// Pop the fake frame
1148	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
1149	ldr		r8, [r9, ACT_CPUDATAP]				// Get current cpu
1150#if __ARM_USER_PROTECT__
1151	ldr		r0, [r9, ACT_KPTW_TTB]              // Load kernel ttb
1152	cmp		r10, r0
1153	beq		1f
1154	ldr		r10, [r9, ACT_UPTW_TTB]             // Load thread ttb
1155	cmp		r10, r0
1156	beq		1f
1157	mcr		p15, 0, r10, c2, c0, 0              // Set TTBR0
1158	ldr		r11, [r9, ACT_ASID]                 // Load thread asid
11591:
1160	mcr		p15, 0, r11, c13, c0, 1             // set CONTEXTIDR
1161	isb
1162#endif
1163lags1:
1164	ldr		lr, [sp, SS_LR]
1165
1166	mov		ip, sp                              // Save pointer to contexts for abort mode
1167	ldr		sp, [ip, SS_SP]                     // Restore stack pointer
1168
1169	cpsid	if, #PSR_ABT_MODE
1170
1171	mov		sp, ip
1172
1173	ldr		r4, [sp, SS_CPSR]
1174	msr		spsr_cxsf, r4						// Restore spsr
1175
1176	clrex										// clear exclusive memory tag
1177	sev
1178
1179#if __ARM_VFP__
1180	add		r0, sp, SS_SIZE					// Get vfp state pointer
1181	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1182	add		r0, VSS_ALIGN					// Get the actual vfp save area
1183	bl		EXT(vfp_load)					// Load the desired VFP state from the stack
1184#endif
1185
1186	ldr		lr, [sp, SS_PC]						// Restore lr
1187
1188	ldmia	sp, {r0-r12}						// Restore other registers
1189
1190	movs	pc, lr								// Return to sys (svc, irq, fiq)
1191
1192UNWIND_EPILOGUE
1193
1194/*
1195 * First Level Exception Handler for address exception
1196 * Not supported
1197 */
1198	.text
1199	.align 2
1200	.globl EXT(fleh_addrexc)
1201
1202LEXT(fleh_addrexc)
1203	b	.
1204
1205
1206/*
1207 * First Level Exception Handler for IRQ
1208 * Current mode : IRQ
1209 * IRQ and FIQ are always disabled while running in FIQ handler
1210 * We do not permit nested interrupt.
1211 *
1212 * Saving area: from user   : PCB.
1213 *		from kernel : interrupt stack.
1214 */
1215
1216	.text
1217	.align 2
1218	.globl EXT(fleh_irq)
1219
1220LEXT(fleh_irq)
1221	sub		lr, lr, #4
1222
1223	cpsie	a									// Re-enable async aborts
1224
1225	mrs		sp, spsr
1226	tst		sp, #0x0f							// From user? or kernel?
1227	bne		fleh_irq_kernel
1228
1229fleh_irq_user:
1230	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
1231	add		sp, sp, ACT_PCBDATA					// Get User PCB
1232	stmia	sp, {r0-r12, sp, lr}^
1233	mov		r7, #0								// Zero the frame pointer
1234	nop
1235	str		lr, [sp, SS_PC]
1236	mrs		r4, spsr
1237	str		r4, [sp, SS_CPSR]
1238	mov		r5, sp								// Saved context in r5
1239	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1240	ldr		r6, [r9, ACT_CPUDATAP]				// Get current cpu
1241	ldr		sp,	[r6, CPU_ISTACKPTR]				// Set interrupt stack
1242	cpsid	i, #PSR_SVC_MODE
1243	ldr		sp, [r9, TH_KSTACKPTR]				// Set kernel stack
1244	cpsid	i, #PSR_IRQ_MODE
1245
1246#if __ARM_VFP__
1247	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
1248	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
1249	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1250	fmxr		fpscr, r4					// And shove it into FPSCR
1251#endif
1252#if __ARM_USER_PROTECT__
1253	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1254	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1255	mov		r3, #0								// Load kernel asid
1256	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1257	isb
1258#endif
1259#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1260	bl		EXT(timer_state_event_user_to_kernel)
1261	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1262#endif
1263#if CONFIG_TELEMETRY
1264	LOAD_ADDR(r2, telemetry_needs_record)		// Check if a telemetry record was requested...
1265	mov		r0, #1
1266	ldr		r2, [r2]
1267	movs	r2, r2
1268	beq		1f
1269	mov		r1, #0					// (not a PMI record)
1270	bl		EXT(telemetry_mark_curthread)		// ...if so, mark the current thread...
1271	mrc		p15, 0, r9, c13, c0, 4				// ...and restore the thread pointer from TPIDRPRW
12721:
1273#endif
1274
1275	b		fleh_irq_handler
1276
1277fleh_irq_kernel:
1278	cpsid	i, #PSR_SVC_MODE
1279
1280	sub     sp, sp, EXC_CTX_SIZE
1281	stmia	sp, {r0-r12}
1282	add		r0, sp, EXC_CTX_SIZE
1283
1284	str		r0, [sp, SS_SP]						// Save supervisor mode sp
1285	str		lr, [sp, SS_LR]                     // Save supervisor mode lr
1286
1287	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1288
1289#if __ARM_VFP__
1290	add		r0, sp, SS_SIZE					// Get vfp state pointer
1291	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1292	add		r0, VSS_ALIGN					// Get the actual vfp save area
1293	bl		EXT(vfp_save)					// Save the current VFP state to the stack
1294	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1295	fmxr		fpscr, r4					// And shove it into FPSCR
1296#endif
1297#if __ARM_USER_PROTECT__
1298	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1299	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1300	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1301	mrc		p15, 0, r11, c13, c0, 1				// Get CONTEXTIDR
1302	mov		r3, #0								// Load kernel asid
1303	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1304	isb
1305#endif
1306	mov		r5, sp								// Saved context in r5
1307
1308	cpsid	i, #PSR_IRQ_MODE
1309
1310	str		lr, [r5, SS_PC]                     // Save LR as the return PC
1311	mrs		r4, spsr
1312	str		r4, [r5, SS_CPSR]                   // Save the cpsr of the interrupted mode
1313
1314	ldr		sp, [r9, ACT_CPUDATAP]				// Get current cpu
1315	ldr		sp,	[sp, CPU_ISTACKPTR]				// Set interrupt stack
1316
1317#if CONFIG_TELEMETRY
1318	LOAD_ADDR(r2, telemetry_needs_record)		// Check if a telemetry record was requested...
1319	mov		r0, #0
1320	ldr		r2, [r2]
1321	movs	r2, r2
1322	beq		1f
1323	mov		r1, #0					// (not a PMI record)
1324	bl		EXT(telemetry_mark_curthread)		// ...if so, mark the current thread...
1325	mrc		p15, 0, r9, c13, c0, 4				// ...and restore the thread pointer from TPIDRPRW
13261:
1327#endif
1328
1329fleh_irq_handler:
1330	ldr		r2, [r9, ACT_PREEMPT_CNT]           // Load preemption count
1331	add		r2, r2, #1							// Increment count
1332	str		r2, [r9, ACT_PREEMPT_CNT]			// Update preemption count
1333#ifndef	NO_KDEBUG
1334	LOAD_ADDR(r8, kdebug_enable)
1335	ldr		r8, [r8]
1336	movs	r8, r8
1337	movne	r0, r5
1338	COND_EXTERN_BLNE(interrupt_trace)
1339#endif
1340	bl	    EXT(interrupt_stats)                // Record interrupt statistics
1341	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
1342	ldr		r4, [r9, ACT_CPUDATAP]				// Get current cpu
1343	str		r5, [r4, CPU_INT_STATE] 			// Saved context in cpu_int_state
1344	ldr		r3, [r4, CPU_STAT_IRQ]				// Get IRQ count
1345	add		r3, r3, #1					// Increment count
1346	str		r3, [r4, CPU_STAT_IRQ]				// Update  IRQ count
1347	ldr		r3, [r4, CPU_STAT_IRQ_WAKE]			// Get post-wake IRQ count
1348	add		r3, r3, #1					// Increment count
1349	str		r3, [r4, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1350	ldr		r0, [r4, INTERRUPT_TARGET]
1351	ldr		r1, [r4, INTERRUPT_REFCON]
1352	ldr		r2, [r4, INTERRUPT_NUB]
1353	ldr		r3, [r4, INTERRUPT_SOURCE]
1354	ldr		r5, [r4, INTERRUPT_HANDLER]			//  Call second level exception handler
1355	blx		r5
1356#ifndef	NO_KDEBUG
1357	movs	r8, r8
1358	COND_EXTERN_BLNE(interrupt_trace_exit)
1359#endif
1360	bl		EXT(entropy_collect)
1361return_from_irq:
1362	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
1363	mov		r5, #0
1364	ldr		r4, [r9, ACT_CPUDATAP]				// Get current cpu
1365	str		r5, [r4, CPU_INT_STATE]				// Clear cpu_int_state
1366	ldr		r2, [r9, ACT_PREEMPT_CNT]           // Load preemption count
1367#if MACH_ASSERT
1368	cmp		r2, #0								// verify positive count
1369	bgt		1f
1370	push	{r7, lr}
1371	mov		r7, sp
1372	adr		r0, L_preemption_count_zero_str
1373	blx		EXT(panic)
1374	b		.
13751:
1376#endif
1377	sub		r2, r2, #1							// Decrement count
1378	str		r2, [r9, ACT_PREEMPT_CNT]			// Update preemption count
1379
1380	mrs		r0, spsr							// For check the previous mode
1381
1382	cpsid	i, #PSR_SVC_MODE
1383
1384	tst		r0, #0x0f							// Check if the previous is from user
1385	ldreq   sp, [r9, TH_KSTACKPTR]              // ...If so, reload the kernel stack pointer
1386	beq     load_and_go_user                    // ...and return
1387
1388#if __ARM_USER_PROTECT__
1389	ldr		r0, [r9, ACT_KPTW_TTB]              // Load kernel ttb
1390	cmp		r10, r0
1391	beq		1f
1392	ldr		r10, [r9, ACT_UPTW_TTB]             // Load thread ttb
1393	cmp		r10, r0
1394	beq		1f
1395	mcr		p15, 0, r10, c2, c0, 0              // Set TTBR0
1396	ldr		r11, [r9, ACT_ASID]                 // Load thread asid
13971:
1398	mcr		p15, 0, r11, c13, c0, 1             // set CONTEXTIDR
1399	isb
1400#endif
1401	b       load_and_go_sys
1402
1403	.align 2
1404L_preemption_count_zero_str:
1405	.ascii	"locore.s: preemption count is zero \000"
1406	.align 2
1407/*
1408 * First Level Exception Handler for DEC
1409 * Current mode : IRQ
1410 * IRQ and FIQ are always disabled while running in FIQ handler
1411 * We do not permit nested interrupt.
1412 *
1413 * Saving area: from user   : PCB.
1414 *		from kernel : interrupt stack.
1415 */
1416
1417	.text
1418	.align 2
1419	.globl EXT(fleh_decirq)
1420
1421LEXT(fleh_decirq)
1422	sub		lr, lr, #4
1423
1424	cpsie		af								// Re-enable async aborts/FIQ
1425
1426	mrs		sp, spsr
1427	tst		sp, #0x0f							// From user? or kernel?
1428	bne		fleh_decirq_kernel
1429
1430fleh_decirq_user:
1431	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
1432	add		sp, sp, ACT_PCBDATA					// Get User PCB
1433	stmia	sp, {r0-r12, sp, lr}^
1434	mov		r7, #0								// Zero the frame pointer
1435	nop
1436	str		lr, [sp, SS_PC]
1437	mrs		r4, spsr
1438	str		r4, [sp, SS_CPSR]
1439	mov		r5, sp								// Saved context in r5
1440	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1441	ldr		r6, [r9, ACT_CPUDATAP]				// Get current cpu
1442	ldr		sp,	[r6, CPU_ISTACKPTR]				// Set interrupt stack
1443	cpsid	i, #PSR_SVC_MODE
1444	ldr		sp, [r9, TH_KSTACKPTR]				// Set kernel stack
1445	cpsid	i, #PSR_IRQ_MODE
1446
1447#if __ARM_VFP__
1448	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
1449	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
1450	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1451	fmxr		fpscr, r4					// And shove it into FPSCR
1452#endif
1453#if __ARM_USER_PROTECT__
1454	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1455	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1456	mov		r3, #0								// Load kernel asid
1457	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1458	isb
1459#endif
1460#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1461	bl		EXT(timer_state_event_user_to_kernel)
1462	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1463#endif
1464#if CONFIG_TELEMETRY
1465	LOAD_ADDR(r2, telemetry_needs_record)		// Check if a telemetry record was requested...
1466	mov		r0, #1
1467	ldr		r2, [r2]
1468	movs	r2, r2
1469	beq		1f
1470	mov 		r1, #0					// (not a PMI record)
1471	bl		EXT(telemetry_mark_curthread)		// ...if so, mark the current thread...
1472	mrc		p15, 0, r9, c13, c0, 4				// ...and restore the thread pointer from TPIDRPRW
14731:
1474#endif
1475
1476	b		fleh_decirq_handler
1477
1478fleh_decirq_kernel:
1479	cpsid	i, #PSR_SVC_MODE
1480
1481	sub     sp, sp, EXC_CTX_SIZE
1482	stmia	sp, {r0-r12}
1483	add		r0, sp, EXC_CTX_SIZE
1484
1485	str		r0, [sp, SS_SP]						// Save supervisor mode sp
1486	str		lr, [sp, SS_LR]                     // Save supervisor mode lr
1487
1488	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1489
1490#if __ARM_VFP__
1491	add		r0, sp, SS_SIZE					// Get vfp state pointer
1492	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1493	add		r0, VSS_ALIGN					// Get the actual vfp save area
1494	bl		EXT(vfp_save)					// Save the current VFP state to the stack
1495	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1496	fmxr		fpscr, r4					// And shove it into FPSCR
1497#endif
1498#if __ARM_USER_PROTECT__
1499	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1500	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1501	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1502	mrc		p15, 0, r11, c13, c0, 1				// Get CONTEXTIDR
1503	mov		r3, #0								// Load kernel asid
1504	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1505	isb
1506#endif
1507	mov		r5, sp								// Saved context in r5
1508
1509	cpsid	i, #PSR_IRQ_MODE
1510
1511	str		lr, [r5, SS_PC]                     // Save LR as the return PC
1512	mrs		r4, spsr
1513	str		r4, [r5, SS_CPSR]                   // Save the cpsr of the interrupted mode
1514
1515	ldr		sp, [r9, ACT_CPUDATAP]				// Get current cpu
1516	ldr		sp,	[sp, CPU_ISTACKPTR]				// Set interrupt stack
1517
1518#if CONFIG_TELEMETRY
1519	LOAD_ADDR(r2, telemetry_needs_record)		// Check if a telemetry record was requested...
1520	mov		r0, #0
1521	ldr		r2, [r2]
1522	movs	r2, r2
1523	beq		1f
1524	mov		r1, #0					// (not a pmi record)
1525	bl		EXT(telemetry_mark_curthread)		// ...if so, mark the current thread...
1526	mrc		p15, 0, r9, c13, c0, 4				// ...and restore the thread pointer from TPIDRPRW
15271:
1528#endif
1529
1530fleh_decirq_handler:
1531	ldr		r2, [r9, ACT_PREEMPT_CNT]           // Load preemption count
1532	add		r2, r2, #1							// Increment count
1533	str		r2, [r9, ACT_PREEMPT_CNT]			// Update preemption count
1534	ldr		r2, [r9, ACT_CPUDATAP]				// Get current cpu
1535	str		r5, [r2, CPU_INT_STATE]				// Saved context in cpu_int_state
1536	ldr		r3, [r2, CPU_STAT_IRQ]				// Get IRQ count
1537	add		r3, r3, #1							// Increment count
1538	str		r3, [r2, CPU_STAT_IRQ]				// Update IRQ count
1539	ldr		r3, [r2, CPU_STAT_IRQ_WAKE]			// Get post-wake IRQ count
1540	add		r3, r3, #1					// Increment count
1541	str		r3, [r2, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1542#ifndef NO_KDEBUG
1543	LOAD_ADDR(r4, kdebug_enable)
1544	ldr		r4, [r4]
1545	movs	r4, r4
1546	movne	r0, r5								// Pass saved context
1547	COND_EXTERN_BLNE(interrupt_trace)
1548#endif
1549	bl		EXT(interrupt_stats)                // Record interrupt statistics
1550	mov		r0, #0
1551	bl		EXT(rtclock_intr)					// Call second level exception handler
1552#ifndef NO_KDEBUG
1553	movs	r4, r4
1554	COND_EXTERN_BLNE(interrupt_trace_exit)
1555#endif
1556
1557	b		return_from_irq
1558
1559
1560/*
1561 * First Level Exception Handler for FIQ
1562 * Current mode : FIQ
1563 * IRQ and FIQ are always disabled while running in FIQ handler
1564 * We do not permit nested interrupt.
1565 *
1566 * Saving area: from user   : PCB.
1567 *		from kernel : interrupt stack.
1568 *
1569 * We have 7 added shadow registers in FIQ mode for fast services.
1570 * So only we have to save is just 8 general registers and LR.
1571 * But if the current thread was running on user mode before the FIQ interrupt,
1572 * All user registers be saved for ast handler routine.
1573 */
1574	.text
1575	.align 2
1576	.globl EXT(fleh_fiq_generic)
1577
1578LEXT(fleh_fiq_generic)
1579	str		r11, [r10]							// Clear the FIQ source
1580
1581	ldr		r13, [r8, CPU_TIMEBASE_LOW]			// Load TBL
1582	adds	r13, r13, #1						// Increment TBL
1583	str		r13, [r8, CPU_TIMEBASE_LOW]			// Store TBL
1584	ldreq	r13, [r8, CPU_TIMEBASE_HIGH]		// Load TBU
1585	addeq	r13, r13, #1						// Increment TBU
1586	streq	r13, [r8, CPU_TIMEBASE_HIGH]		// Store TBU
1587	subs	r12, r12, #1						// Decrement, DEC
1588	str		r12, [r8, CPU_DECREMENTER]			// Store DEC
1589	subspl	pc, lr, #4							// Return unless DEC < 0
1590	b		EXT(fleh_dec)
1591
1592	.text
1593	.align	2
1594	.globl	EXT(fleh_dec)
1595LEXT(fleh_dec)
1596	mrs		sp, spsr							// Get the spsr
1597	sub		lr, lr, #4
1598	tst		sp, #0x0f							// From user? or kernel?
1599	bne		2f
1600
1601	/* From user */
1602	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
1603	add		sp, sp, ACT_PCBDATA					// Get User PCB
1604
1605	stmia	sp, {r0-r12, sp, lr}^
1606	mov		r7, #0								// Zero the frame pointer
1607	nop
1608	str		lr, [sp, SS_PC]
1609
1610	mrs		r4, spsr
1611	str		r4, [sp, SS_CPSR]
1612	mov		r5, sp
1613	sub		sp, sp, ACT_PCBDATA					// Get User PCB
1614	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu
1615	ldr		sp,	[sp, CPU_ISTACKPTR]				// Set interrupt stack
1616	mov		r6, sp
1617	cpsid	i, #PSR_SVC_MODE
1618	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1619	ldr		sp, [r9, TH_KSTACKPTR]				// Set kernel stack
1620
1621#if __ARM_VFP__
1622	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
1623	bl		EXT(vfp_save)					// Save the current VFP state to ACT_UVFP
1624	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1625	fmxr		fpscr, r4					// And shove it into FPSCR
1626#endif
1627#if __ARM_USER_PROTECT__
1628	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1629	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1630	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1631	mrc		p15, 0, r11, c13, c0, 1				// Get CONTEXTIDR
1632	mov		r3, #0								// Load kernel asid
1633	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1634	isb
1635#endif
1636	mov		r0, #1								// Mark this as coming from user context
1637	b		4f
1638
16392:
1640	/* From kernel */
1641	tst		sp, #PSR_IRQF						// Test for IRQ masked
1642	bne		3f									// We're on the cpu_signal path
1643
1644	cpsid   if, #PSR_SVC_MODE
1645
1646	sub     sp, sp, EXC_CTX_SIZE
1647	stmia	sp, {r0-r12}
1648	add		r0, sp, EXC_CTX_SIZE
1649
1650	str		r0, [sp, SS_SP]						// Save supervisor mode sp
1651	str		lr, [sp, SS_LR]                     // Save supervisor mode lr
1652
1653	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1654
1655#if __ARM_VFP__
1656	add		r0, sp, SS_SIZE					// Get vfp state pointer
1657	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1658	add		r0, VSS_ALIGN					// Get the actual vfp save area
1659	bl		EXT(vfp_save)					// Save the current VFP state to the stack
1660	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1661	fmxr		fpscr, r4					// And shove it into FPSCR
1662#endif
1663#if __ARM_USER_PROTECT__
1664	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1665	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1666	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1667	mrc		p15, 0, r11, c13, c0, 1				// Get CONTEXTIDR
1668	mov		r3, #0								// Load kernel asid
1669	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1670	isb
1671#endif
1672	mov		r5, sp								// Saved context in r5
1673
1674	cpsid   if, #PSR_FIQ_MODE
1675
1676	mrc     p15, 0, r1, c13, c0, 4              // Read TPIDRPRW
1677
1678	str		lr, [r5, SS_PC]                     // Save LR as the return PC
1679	mrs		r4, spsr
1680	str		r4, [r5, SS_CPSR]                   // Save the cpsr of the interrupted mode
1681
1682	ldr		r6, [r1, ACT_CPUDATAP]				// Get current cpu
1683	ldr		r6,	[r6, CPU_ISTACKPTR]				// Set interrupt stack
1684
1685	mov		r0, #0								// Mark this as coming from kernel context
1686	b       4f
1687
16883:
1689	/* cpu_signal path */
1690	mrc		p15, 0, sp, c13, c0, 4				// Read TPIDRPRW
1691	ldr		sp, [sp, ACT_CPUDATAP]				// Get current cpu
1692	ldr		sp,	[sp, CPU_FIQSTACKPTR]			// Set fiq stack
1693	sub		sp, sp, EXC_CTX_SIZE
1694	stmia		sp, {r0-r12}
1695	str		lr, [sp, SS_PC]
1696	mrs		r4, spsr
1697	str		r4, [sp, SS_CPSR]
1698	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1699
1700#if __ARM_VFP__
1701	add		r0, sp, SS_SIZE					// Get vfp state pointer
1702	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1703	add		r0, VSS_ALIGN					// Get the actual vfp save area
1704	bl		EXT(vfp_save)					// Save the current VFP state to the stack
1705	mov		r4, #FPSCR_DEFAULT				// Load up the default FPSCR value...
1706	fmxr		fpscr, r4					// And shove it into FPSCR
1707#endif
1708#if __ARM_USER_PROTECT__
1709	mrc		p15, 0, r10, c2, c0, 0				// Get TTBR0
1710	ldr		r3, [r9, ACT_KPTW_TTB]				// Load kernel ttb
1711	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1712	mrc		p15, 0, r11, c13, c0, 1				// Get CONTEXTIDR
1713	mov		r3, #0								// Load kernel asid
1714	mcr		p15, 0, r3, c13, c0, 1				// Set CONTEXTIDR
1715	isb
1716#endif
1717
1718	ALIGN_STACK r0, r1
1719	mov		r0, r8								// Get current cpu in arg 0
1720	mov		r1, SIGPdec							// Decrementer signal in arg1
1721	mov		r2, #0
1722	mov		r3, #0
1723	bl		EXT(cpu_signal)						// Call cpu_signal
1724	UNALIGN_STACK
1725
1726	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1727
1728#if __ARM_VFP__
1729	add		r0, sp, SS_SIZE					// Get vfp state pointer
1730	bic		r0, #(VSS_ALIGN_NUM - 1)			// Align to arm_vfpsaved_state alignment
1731	add		r0, VSS_ALIGN					// Get the actual vfp save area
1732	bl		EXT(vfp_load)					// Load the desired VFP state from the stack
1733#endif
1734
1735	clrex										// clear exclusive memory tag
1736	sev
1737#if __ARM_USER_PROTECT__
1738	mcr		p15, 0, r10, c2, c0, 0				// Set TTBR0
1739	mcr		p15, 0, r11, c13, c0, 1				// Set CONTEXTIDR
1740	isb
1741#endif
1742	ldr		lr, [sp, SS_PC]
1743	ldmia	sp, {r0-r12}						// Restore saved registers
1744	movs	pc, lr								// Return from fiq
1745
17464:
1747	cpsid	i, #PSR_IRQ_MODE
1748	cpsie	f
1749	mov		sp, r6								// Restore the stack pointer
1750	ALIGN_STACK r2, r3
1751	msr		spsr_cxsf, r4						// Restore the spsr
1752	ldr		r2, [r9, ACT_PREEMPT_CNT]           // Load preemption count
1753	add		r2, r2, #1							// Increment count
1754	str		r2, [r9, ACT_PREEMPT_CNT]			// Update preemption count
1755	ldr		r4, [r9, ACT_CPUDATAP]				// Get current cpu
1756	str		r5, [r4, CPU_INT_STATE]
1757	ldr		r3, [r4, CPU_STAT_IRQ]				// Get IRQ count
1758	add		r3, r3, #1							// Increment count
1759	str		r3, [r4, CPU_STAT_IRQ]				// Update IRQ count
1760	ldr		r3, [r4, CPU_STAT_IRQ_WAKE]			// Get post-wake IRQ count
1761	add		r3, r3, #1					// Increment count
1762	str		r3, [r4, CPU_STAT_IRQ_WAKE]			// Update post-wake IRQ count
1763#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1764	movs	r0, r0
1765	beq		5f
1766	mov	r8, r0							// Stash our "from_user" boolean value
1767	bl		EXT(timer_state_event_user_to_kernel)
1768	mov	r0, r8							// Restore our "from_user" value
1769	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
17705:
1771#endif
1772#if CONFIG_TELEMETRY
1773	LOAD_ADDR(r4, telemetry_needs_record)		// Check if a telemetry record was requested...
1774	ldr		r4, [r4]
1775	movs		r4, r4
1776	beq		6f
1777	mov		r1, #0					// (not a PMI record)
1778	bl		EXT(telemetry_mark_curthread)		// ...if so, mark the current thread...
1779	mrc		p15, 0, r9, c13, c0, 4				// ...and restore the thread pointer from TPIDRPRW
17806:
1781#endif
1782
1783#ifndef NO_KDEBUG
1784	LOAD_ADDR(r4, kdebug_enable)
1785	ldr     r4, [r4]
1786	movs    r4, r4
1787	ldrne	r1, [r9, ACT_CPUDATAP]				// Get current cpu
1788	ldrne	r0, [r1, CPU_INT_STATE]
1789	COND_EXTERN_BLNE(interrupt_trace)
1790#endif
1791	bl		EXT(interrupt_stats)                // Record interrupt statistics
1792	mov		r0, #0
1793	bl		EXT(rtclock_intr)					// Call second level exception handler
1794#ifndef NO_KDEBUG
1795	movs	r4, r4
1796	COND_EXTERN_BLNE(interrupt_trace_exit)
1797#endif
1798	UNALIGN_STACK
1799
1800	b       return_from_irq
1801
1802/*
1803 * void thread_syscall_return(kern_return_t r0)
1804 *
1805 */
1806	.text
1807	.align 2
1808	.globl EXT(thread_syscall_return)
1809
1810LEXT(thread_syscall_return)
1811	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1812	add		r1, r9, ACT_PCBDATA					// Get User PCB
1813	str		r0, [r1, SS_R0]						// set return value
1814#ifndef	NO_KDEBUG
1815	LOAD_ADDR(r4, kdebug_enable)
1816	ldr		r4, [r4]
1817	movs	r4, r4
1818	beq		load_and_go_user
1819	ldr		r12, [r1, SS_R12]					// Load syscall number
1820	rsbs	r1, r12, #0							// make the syscall positive (if negative)
1821	COND_EXTERN_BLGT(mach_syscall_trace_exit)
1822#endif
1823	b		load_and_go_user
1824
1825/*
1826 * void thread_exception_return(void)
1827 * void thread_bootstrap_return(void)
1828 *
1829 */
1830	.text
1831	.globl EXT(thread_exception_return)
1832	.globl EXT(thread_bootstrap_return)
1833
1834LEXT(thread_bootstrap_return)
1835#if CONFIG_DTRACE
1836	bl EXT(dtrace_thread_bootstrap)
1837#endif
1838	// Fall through
1839
1840LEXT(thread_exception_return)
1841
1842load_and_go_user:
1843/*
1844 * Restore user mode states and go back to user mode
1845 */
1846	cpsid	i									// Disable irq
1847	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1848
1849	mvn		r0, #0
1850	str		r0, [r9, TH_IOTIER_OVERRIDE]			// Reset IO tier override to -1 before returning to user
1851
1852	ldr		r8, [r9, ACT_CPUDATAP]				// Get current cpu
1853	ldr		r5, [r8, CPU_PENDING_AST]			// Get ASTs
1854	cmp		r5, #0								// Test if ASTs pending
1855	beq		return_to_user_now					// Branch if no ASTs
1856
1857	bl		EXT(ast_taken_user)					// Handle all ASTs (may continue via thread_exception_return)
1858
1859	mrc		p15, 0, r9, c13, c0, 4				// Reload r9 from TPIDRPRW
1860	b	load_and_go_user						// Loop back
1861
1862return_to_user_now:
1863
1864#if MACH_ASSERT
1865/*
1866 * Assert that the preemption level is zero prior to the return to user space
1867 */
1868	ldr		r1, [r9, ACT_PREEMPT_CNT]			// Load preemption count
1869	cmp		r1, #0						// Test
1870	bne		L_lagu_preempt_panic				// Panic if not zero
1871#endif
1872
1873#if	!CONFIG_SKIP_PRECISE_USER_KERNEL_TIME
1874	bl		EXT(timer_state_event_kernel_to_user)
1875	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
1876	ldr		r8, [r9, ACT_CPUDATAP]				// Get current cpu data
1877#endif	/* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME */
1878#if __ARM_DEBUG__ >= 6
1879	ldr		r0, [r9, ACT_DEBUGDATA]
1880	ldr		r6, [r8, CPU_USER_DEBUG]
1881	cmp		r0, r6								// test if debug registers need to be changed
1882	beq		1f
1883	bl		EXT(arm_debug_set)					// argument is already in r0
1884	mrc		p15, 0, r9, c13, c0, 4				// Read TPIDRPRW
18851:
1886#endif
1887#if __ARM_VFP__
1888	add		r0, r9, ACT_UVFP				// Get the address of the user VFP save area
1889	bl		EXT(vfp_load)					// Load the desired VFP state from ACT_UVFP
1890#endif
1891	add		r0, r9, ACT_PCBDATA					// Get User PCB
1892	ldr		r4, [r0, SS_CPSR]					// Get saved cpsr
1893	and		r3, r4, #PSR_MODE_MASK				// Extract current mode
1894	cmp		r3, #PSR_USER_MODE					// Check user mode
1895	movne	r0, r3
1896	bne		EXT(ExceptionVectorPanic)
1897
1898	msr		spsr_cxsf, r4						// Restore spsr(user mode cpsr)
1899	mov		sp, r0								// Get User PCB
1900
1901	clrex										// clear exclusive memory tag
1902	sev
1903#if __ARM_USER_PROTECT__
1904	ldr     r3, [r9, ACT_UPTW_TTB]              // Load thread ttb
1905	mcr		p15, 0, r3, c2, c0, 0				// Set TTBR0
1906	ldr		r2, [r9, ACT_ASID]					// Load thread asid
1907	mcr		p15, 0, r2, c13, c0, 1
1908	isb
1909#endif
1910	ldr		lr, [sp, SS_PC]						// Restore user mode pc
1911	ldmia	sp, {r0-r12, sp, lr}^				// Restore the other user mode registers
1912	nop											// Hardware problem
1913	movs	pc, lr								// Return to user
1914
1915#if MACH_ASSERT
1916/*
1917 * r1: current preemption count
1918 * r9: current_thread()
1919 */
1920L_lagu_preempt_panic:
1921	adr		r0, L_lagu_preempt_panic_str			// Load the panic string...
1922	blx		EXT(panic)					// Finally, panic
1923
1924	.align  2
1925L_lagu_preempt_panic_str:
1926	.asciz  "load_and_go_user: preemption_level %d"
1927	.align  2
1928
1929#endif /* MACH_ASSERT */
1930
1931	.align  2
1932L_evimpanic_str:
1933	.ascii  "Exception Vector: Illegal Mode: 0x%08X\n\000"
1934	.align  2
1935
1936	.text
1937	.align 2
1938	.globl EXT(ExceptionVectorPanic)
1939
1940LEXT(ExceptionVectorPanic)
1941	cpsid i, #PSR_SVC_MODE
1942	ALIGN_STACK r1, r2
1943	mov		r1, r0
1944	adr		r0, L_evimpanic_str
1945	blx		EXT(panic)
1946	b		.
1947
1948#include	"globals_asm.h"
1949
1950LOAD_ADDR_GEN_DEF(mach_trap_table)
1951LOAD_ADDR_GEN_DEF(kern_invalid)
1952
1953/* vim: set ts=4: */
1954