xref: /xnu-8019.80.24/osfmk/arm64/cswitch.s (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <machine/asm.h>
29#include <arm64/machine_machdep.h>
30#include <arm64/machine_routines_asm.h>
31#include <arm64/pac_asm.h>
32#include <arm64/proc_reg.h>
33#include "assym.s"
34
35/*
36 * save_general_registers
37 *
38 * Saves variable registers to kernel PCB.
39 *   arg0 - thread_kernel_state pointer
40 *   arg1 - Scratch register
41 */
42
43.macro	save_general_registers
44/* AAPCS-64 Page 14
45 *
46 * A subroutine invocation must preserve the contents of the registers r19-r29
47 * and SP.
48 */
49#if __has_feature(ptrauth_calls)
50	paciasp
51#endif
52	stp		x19, x20, [$0, SS64_KERNEL_X19]
53	stp		x21, x22, [$0, SS64_KERNEL_X21]
54	stp		x23, x24, [$0, SS64_KERNEL_X23]
55	stp		x25, x26, [$0, SS64_KERNEL_X25]
56	stp		x27, x28, [$0, SS64_KERNEL_X27]
57	stp		fp, lr, [$0, SS64_KERNEL_FP]
58	str		xzr, [$0, SS64_KERNEL_PC]
59	mov		x$1, sp
60	str		x$1, [$0, SS64_KERNEL_SP]
61
62/* AAPCS-64 Page 14
63 *
64 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine
65 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved
66 * (or should be preserved by the caller).
67 */
68	str		d8,	[$0, NS64_KERNEL_D8]
69	str		d9,	[$0, NS64_KERNEL_D9]
70	str		d10,[$0, NS64_KERNEL_D10]
71	str		d11,[$0, NS64_KERNEL_D11]
72	str		d12,[$0, NS64_KERNEL_D12]
73	str		d13,[$0, NS64_KERNEL_D13]
74	str		d14,[$0, NS64_KERNEL_D14]
75	str		d15,[$0, NS64_KERNEL_D15]
76
77	mrs		x$1, FPCR
78	str		w$1, [$0, NS64_KERNEL_FPCR]
79.endmacro
80
81/*
82 * load_general_registers
83 *
84 * Loads variable registers from kernel PCB.
85 *   arg0 - thread_kernel_state pointer
86 *   arg1 - Scratch register
87 */
88.macro	load_general_registers
89	ldr		w$1, [$0, NS64_KERNEL_FPCR]
90	mrs		x19, FPCR
91	CMSR FPCR, x19, x$1, 1
921:
93
94	ldp		x19, x20, [$0, SS64_KERNEL_X19]
95	ldp		x21, x22, [$0, SS64_KERNEL_X21]
96	ldp		x23, x24, [$0, SS64_KERNEL_X23]
97	ldp		x25, x26, [$0, SS64_KERNEL_X25]
98	ldp		x27, x28, [$0, SS64_KERNEL_X27]
99	ldp		fp, lr, [$0, SS64_KERNEL_FP]
100	ldr		x$1, [$0, SS64_KERNEL_SP]
101	mov		sp, x$1
102
103	ldr		d8,	[$0, NS64_KERNEL_D8]
104	ldr		d9,	[$0, NS64_KERNEL_D9]
105	ldr		d10,[$0, NS64_KERNEL_D10]
106	ldr		d11,[$0, NS64_KERNEL_D11]
107	ldr		d12,[$0, NS64_KERNEL_D12]
108	ldr		d13,[$0, NS64_KERNEL_D13]
109	ldr		d14,[$0, NS64_KERNEL_D14]
110	ldr		d15,[$0, NS64_KERNEL_D15]
111.endmacro
112
113/*
114 * cswitch_epilogue
115 *
116 * Returns to the address reloaded into LR, authenticating if needed.
117 */
118.macro	cswitch_epilogue
119#if __has_feature(ptrauth_calls)
120	retaa
121#else
122	ret
123#endif
124.endm
125
126
127/*
128 * set_thread_registers
129 *
130 * Updates thread registers during context switch
131 *  arg0 - New thread pointer
132 *  arg1 - Scratch register
133 *  arg2 - Scratch register
134 */
135.macro	set_thread_registers
136	msr		TPIDR_EL1, $0						// Write new thread pointer to TPIDR_EL1
137	ldr		$1, [$0, ACT_CPUDATAP]
138	str		$0, [$1, CPU_ACTIVE_THREAD]
139
140	ldrsh	$2, [$1, CPU_NUMBER_GS]
141	msr		TPIDR_EL0, $2
142
143	ldr		$1, [$0, TH_CTH_SELF]				// Get cthread pointer
144	msr		TPIDRRO_EL0, $1
145
146#if DEBUG || DEVELOPMENT
147	ldr		$1, [$0, TH_THREAD_ID]				// Save the bottom 32-bits of the thread ID into
148	msr		CONTEXTIDR_EL1, $1					// CONTEXTIDR_EL1 (top 32-bits are RES0).
149#endif /* DEBUG || DEVELOPMENT */
150.endmacro
151
152#define CSWITCH_ROP_KEYS	(HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC)
153#define CSWITCH_JOP_KEYS	(HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC)
154
155/*
156 * set_process_dependent_keys_and_sync_context
157 *
158 * Updates process dependent keys and issues explicit context sync during context switch if necessary
159 *  Per CPU Data rop_key is initialized in arm_init() for bootstrap processor
160 *  and in cpu_data_init for slave processors
161 *
162 *  thread - New thread pointer
163 *  new_key - Scratch register: New Thread Key
164 *  tmp_key - Scratch register: Current CPU Key
165 *  cpudatap - Scratch register: Current CPU Data pointer
166 *  wsync - Half-width scratch register: CPU sync required flag
167 *
168 *  to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5,
169 *  we just use wsync to keep track of needing an ISB
170 */
171.macro set_process_dependent_keys_and_sync_context	thread, new_key, tmp_key, cpudatap, wsync
172
173
174#if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC)
175	ldr		\cpudatap, [\thread, ACT_CPUDATAP]
176#endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */
177
178#if defined(__ARM_ARCH_8_5__)
179	ldrb	\wsync, [\cpudatap, CPU_SYNC_ON_CSWITCH]
180#else /* defined(__ARM_ARCH_8_5__) */
181	mov		\wsync, #0
182#endif
183
184#if CSWITCH_ROP_KEYS
185	ldr		\new_key, [\thread, TH_ROP_PID]
186	REPROGRAM_ROP_KEYS	Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key
187#if HAS_PARAVIRTUALIZED_PAC
188	/* xnu hypervisor guarantees context synchronization during guest re-entry */
189	mov		\wsync, #0
190#else
191	mov		\wsync, #1
192#endif
193Lskip_rop_keys_\@:
194#endif /* CSWITCH_ROP_KEYS */
195
196#if CSWITCH_JOP_KEYS
197	ldr		\new_key, [\thread, TH_JOP_PID]
198	REPROGRAM_JOP_KEYS	Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
199#if HAS_PARAVIRTUALIZED_PAC
200	mov		\wsync, #0
201#else
202	mov		\wsync, #1
203#endif
204Lskip_jop_keys_\@:
205#endif /* CSWITCH_JOP_KEYS */
206
207	cbz		\wsync, 1f
208	isb 	sy
209
210#if HAS_PARAVIRTUALIZED_PAC
2111:	/* guests need to clear the sync flag even after skipping the isb, in case they synced via hvc instead */
212#endif
213#if defined(__ARM_ARCH_8_5__)
214	strb	wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH]
215#endif
2161:
217.endmacro
218
219/*
220 * void     machine_load_context(thread_t        thread)
221 *
222 * Load the context for the first thread to run on a
223 * cpu, and go.
224 */
225	.text
226	.align 2
227	.globl	EXT(machine_load_context)
228
229LEXT(machine_load_context)
230	set_thread_registers 	x0, x1, x2
231	ldr		x1, [x0, TH_KSTACKPTR]				// Get top of kernel stack
232	load_general_registers 	x1, 2
233	set_process_dependent_keys_and_sync_context	x0, x1, x2, x3, w4
234	mov		x0, #0								// Clear argument to thread_continue
235	cswitch_epilogue
236
237/*
238 *  typedef void (*thread_continue_t)(void *param, wait_result_t)
239 *
240 *	void Call_continuation( thread_continue_t continuation,
241 *	            			void *param,
242 *				            wait_result_t wresult,
243 *                          bool enable interrupts)
244 */
245	.text
246	.align	5
247	.globl	EXT(Call_continuation)
248
249LEXT(Call_continuation)
250	mrs		x4, TPIDR_EL1						// Get the current thread pointer
251
252	/* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */
253	ldr		x5, [x4, TH_KSTACKPTR]				// Get the top of the kernel stack
254	mov		sp, x5								// Set stack pointer
255	mov		fp, #0								// Clear the frame pointer
256
257	set_process_dependent_keys_and_sync_context	x4, x5, x6, x7, w20
258
259	mov x20, x0  //continuation
260	mov x21, x1  //continuation parameter
261	mov x22, x2  //wait result
262
263	cbz x3, 1f
264	mov x0, #1
265	bl EXT(ml_set_interrupts_enabled)
2661:
267
268	mov		x0, x21								// Set the first parameter
269	mov		x1, x22								// Set the wait result arg
270#ifdef HAS_APPLE_PAC
271	mov		x21, THREAD_CONTINUE_T_DISC
272	blraa	x20, x21							// Branch to the continuation
273#else
274	blr		x20									// Branch to the continuation
275#endif
276	mrs		x0, TPIDR_EL1						// Get the current thread pointer
277	b		EXT(thread_terminate)				// Kill the thread
278
279
280/*
281 *	thread_t Switch_context(thread_t	old,
282 * 				void		(*cont)(void),
283 *				thread_t	new)
284 */
285	.text
286	.align 5
287	.globl	EXT(Switch_context)
288
289LEXT(Switch_context)
290	cbnz	x1, Lswitch_threads					// Skip saving old state if blocking on continuation
291	ldr		x3, [x0, TH_KSTACKPTR]				// Get the old kernel stack top
292	save_general_registers	x3, 4
293Lswitch_threads:
294	set_thread_registers	x2, x3, x4
295	ldr		x3, [x2, TH_KSTACKPTR]
296	load_general_registers	x3, 4
297	set_process_dependent_keys_and_sync_context	x2, x3, x4, x5, w6
298	cswitch_epilogue
299
300/*
301 *	thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor)
302 *
303 */
304	.text
305	.align 5
306	.globl	EXT(Shutdown_context)
307
308LEXT(Shutdown_context)
309	mrs		x10, TPIDR_EL1							// Get thread pointer
310	ldr		x11, [x10, TH_KSTACKPTR]				// Get the top of the kernel stack
311	save_general_registers	x11, 12
312	msr		DAIFSet, #(DAIFSC_FIQF | DAIFSC_IRQF)	// Disable interrupts
313	ldr		x11, [x10, ACT_CPUDATAP]				// Get current cpu
314	ldr		x12, [x11, CPU_ISTACKPTR]				// Switch to interrupt stack
315	mov		sp, x12
316	b		EXT(cpu_doshutdown)
317
318/*
319 *	thread_t Idle_context(void)
320 *
321 */
322	.text
323	.align 5
324	.globl	EXT(Idle_context)
325
326LEXT(Idle_context)
327	mrs		x0, TPIDR_EL1						// Get thread pointer
328	ldr		x1, [x0, TH_KSTACKPTR]				// Get the top of the kernel stack
329	save_general_registers	x1, 2
330	ldr		x1, [x0, ACT_CPUDATAP]				// Get current cpu
331	ldr		x2, [x1, CPU_ISTACKPTR]				// Switch to interrupt stack
332	mov		sp, x2
333	b		EXT(cpu_idle)
334
335/*
336 *	thread_t Idle_context(void)
337 *
338 */
339	.text
340	.align 5
341	.globl	EXT(Idle_load_context)
342
343LEXT(Idle_load_context)
344	mrs		x0, TPIDR_EL1						// Get thread pointer
345	ldr		x1, [x0, TH_KSTACKPTR]				// Get the top of the kernel stack
346	load_general_registers	x1, 2
347	set_process_dependent_keys_and_sync_context	x0, x1, x2, x3, w4
348	cswitch_epilogue
349
350	.align	2
351	.globl	EXT(machine_set_current_thread)
352LEXT(machine_set_current_thread)
353	set_thread_registers x0, x1, x2
354	ret
355
356
357/* vim: set ts=4: */
358