xref: /xnu-11215.61.5/osfmk/arm64/cswitch.s (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1/*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#include <machine/asm.h>
29#include <arm64/machine_machdep.h>
30#include <arm64/machine_routines_asm.h>
31#include <arm64/pac_asm.h>
32#include <arm64/proc_reg.h>
33#include "assym.s"
34
35/*
36 * save_general_registers
37 *
38 * Saves variable registers to kernel PCB.
39 *   arg0 - thread_kernel_state pointer
40 *   arg1 - Scratch register
41 */
42
43.macro	save_general_registers
44/* AAPCS-64 Page 14
45 *
46 * A subroutine invocation must preserve the contents of the registers r19-r29
47 * and SP.
48 */
49#if __has_feature(ptrauth_calls)
50	paciasp
51#endif
52	stp		x19, x20, [$0, SS64_KERNEL_X19]
53	stp		x21, x22, [$0, SS64_KERNEL_X21]
54	stp		x23, x24, [$0, SS64_KERNEL_X23]
55	stp		x25, x26, [$0, SS64_KERNEL_X25]
56	stp		x27, x28, [$0, SS64_KERNEL_X27]
57	stp		fp, lr, [$0, SS64_KERNEL_FP]
58	strb	wzr, [$0, SS64_KERNEL_PC_WAS_IN_USER]
59	mov		x$1, sp
60	str		x$1, [$0, SS64_KERNEL_SP]
61#if HAS_ARM_FEAT_SSBS2
62#if APPLEVIRTUALPLATFORM
63	adrp	x$1, EXT(gARM_FEAT_SSBS)@page
64	ldrh	w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff]
65	cbz		x$1, 1f
66#endif
67	mrs		x$1, SSBS
68	lsr     x$1, x$1, #0 + PSR64_SSBS_SHIFT_64
69	strb	w$1, [$0, SS64_KERNEL_SSBS]
701:
71#endif // HAS_ARM_FEAT_SSBS2
72#if __ARM_ARCH_8_4__
73	mrs		x$1, DIT
74	lsr     x$1, x$1, #0 + PSR64_DIT_SHIFT
75	strb	w$1, [$0, SS64_KERNEL_DIT]
76#endif //__ARM_ARCH_8_4__
77#if __ARM_ARCH_8_2__
78	mrs		x$1, UAO
79	lsr     x$1, x$1, #0 + PSR64_UAO_SHIFT
80	strb	w$1, [$0, SS64_KERNEL_UAO]
81#endif //__ARM_ARCH_8_2__
82
83/* AAPCS-64 Page 14
84 *
85 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine
86 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved
87 * (or should be preserved by the caller).
88 */
89	str		d8,	[$0, NS64_KERNEL_D8]
90	str		d9,	[$0, NS64_KERNEL_D9]
91	str		d10,[$0, NS64_KERNEL_D10]
92	str		d11,[$0, NS64_KERNEL_D11]
93	str		d12,[$0, NS64_KERNEL_D12]
94	str		d13,[$0, NS64_KERNEL_D13]
95	str		d14,[$0, NS64_KERNEL_D14]
96	str		d15,[$0, NS64_KERNEL_D15]
97
98	mrs		x$1, FPCR
99	str		w$1, [$0, NS64_KERNEL_FPCR]
100.endmacro
101
102/*
103 * load_general_registers
104 *
105 * Loads variable registers from kernel PCB.
106 *   arg0 - thread_kernel_state pointer
107 *   arg1 - Scratch register
108 */
109.macro	load_general_registers
110	ldr		w$1, [$0, NS64_KERNEL_FPCR]
111	mrs		x19, FPCR
112	CMSR FPCR, x19, x$1, 1
1131:
114
115	ldp		x19, x20, [$0, SS64_KERNEL_X19]
116	ldp		x21, x22, [$0, SS64_KERNEL_X21]
117	ldp		x23, x24, [$0, SS64_KERNEL_X23]
118	ldp		x25, x26, [$0, SS64_KERNEL_X25]
119	ldp		x27, x28, [$0, SS64_KERNEL_X27]
120	ldp		fp, lr, [$0, SS64_KERNEL_FP]
121	ldr		x$1, [$0, SS64_KERNEL_SP]
122	mov		sp, x$1
123#if HAS_ARM_FEAT_SSBS2
124#if APPLEVIRTUALPLATFORM
125	adrp	x$1, EXT(gARM_FEAT_SSBS)@page
126	ldrh	w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff]
127	cbz		x$1, 1f
128#endif // APPLEVIRTUALPLATFORM
129	ldrb	w$1, [$0, SS64_KERNEL_SSBS]
130	lsl     x$1, x$1, #0 + PSR64_SSBS_SHIFT_64
131	msr		SSBS, x$1
1321:
133#endif // HAS_ARM_FEAT_SSBS2
134#if __ARM_ARCH_8_2__
135	ldrb	w$1, [$0, SS64_KERNEL_UAO]
136	lsl     x$1, x$1, #0 + PSR64_UAO_SHIFT
137	msr		UAO, x$1
138#endif //__ARM_ARCH_8_2__
139#if __ARM_ARCH_8_4__
140	ldrb	w$1, [$0, SS64_KERNEL_DIT]
141	lsl     x$1, x$1, #0 + PSR64_DIT_SHIFT
142	msr		DIT, x$1
143#endif //__ARM_ARCH_8_4__
144
145	ldr		d8,	[$0, NS64_KERNEL_D8]
146	ldr		d9,	[$0, NS64_KERNEL_D9]
147	ldr		d10,[$0, NS64_KERNEL_D10]
148	ldr		d11,[$0, NS64_KERNEL_D11]
149	ldr		d12,[$0, NS64_KERNEL_D12]
150	ldr		d13,[$0, NS64_KERNEL_D13]
151	ldr		d14,[$0, NS64_KERNEL_D14]
152	ldr		d15,[$0, NS64_KERNEL_D15]
153.endmacro
154
155/*
156 * cswitch_epilogue
157 *
158 * Returns to the address reloaded into LR, authenticating if needed.
159 */
160.macro	cswitch_epilogue
161#if __has_feature(ptrauth_calls)
162	retaa
163#else
164	ret
165#endif
166.endm
167
168
169/*
170 * set_thread_registers
171 *
172 * Updates thread registers during context switch
173 *  arg0 - New thread pointer
174 *  arg1 - Scratch register
175 *  arg2 - Scratch register
176 */
177.macro	set_thread_registers
178	msr		TPIDR_EL1, $0						// Write new thread pointer to TPIDR_EL1
179	ldr		$1, [$0, ACT_CPUDATAP]
180	str		$0, [$1, CPU_ACTIVE_THREAD]
181
182	ldr	    $2, [$1, CPU_TPIDR_EL0]             // Write encoded CPU info to TPIDR_EL0
183	msr		TPIDR_EL0, $2
184
185	ldr		$1, [$0, TH_CTH_SELF]				// Get cthread pointer
186	msr		TPIDRRO_EL0, $1
187
188	ldr		$1, [$0, TH_THREAD_ID]				// Save the bottom 32-bits of the thread ID into
189	msr		CONTEXTIDR_EL1, $1					// CONTEXTIDR_EL1 (top 32-bits are RES0).
190.endmacro
191
192#define CSWITCH_ROP_KEYS	(HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC)
193#define CSWITCH_JOP_KEYS	(HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC)
194
195/*
196 * set_process_dependent_keys_and_sync_context
197 *
198 * Updates process dependent keys and issues explicit context sync during context switch if necessary
199 *  Per CPU Data rop_key is initialized in arm_init() for bootstrap processor
200 *  and in cpu_data_init for slave processors
201 *
202 *  thread - New thread pointer
203 *  new_key - Scratch register: New Thread Key
204 *  tmp_key - Scratch register: Current CPU Key
205 *  cpudatap - Scratch register: Current CPU Data pointer
206 *  wsync - Half-width scratch register: CPU sync required flag
207 *
208 *  to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5,
209 *  we just use wsync to keep track of needing an ISB
210 */
211.macro set_process_dependent_keys_and_sync_context	thread, new_key, tmp_key, cpudatap, wsync
212
213
214#if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC)
215	ldr		\cpudatap, [\thread, ACT_CPUDATAP]
216#endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */
217
218#if defined(__ARM_ARCH_8_5__)
219	ldrb	\wsync, [\cpudatap, CPU_SYNC_ON_CSWITCH]
220#else /* defined(__ARM_ARCH_8_5__) */
221	mov		\wsync, #0
222#endif
223
224#if CSWITCH_ROP_KEYS
225	ldr		\new_key, [\thread, TH_ROP_PID]
226	REPROGRAM_ROP_KEYS	Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key
227#if HAS_PARAVIRTUALIZED_PAC
228	/* xnu hypervisor guarantees context synchronization during guest re-entry */
229	mov		\wsync, #0
230#else
231	mov		\wsync, #1
232#endif
233Lskip_rop_keys_\@:
234#endif /* CSWITCH_ROP_KEYS */
235
236#if CSWITCH_JOP_KEYS
237	ldr		\new_key, [\thread, TH_JOP_PID]
238	REPROGRAM_JOP_KEYS	Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key
239#if HAS_PARAVIRTUALIZED_PAC
240	mov		\wsync, #0
241#else
242	mov		\wsync, #1
243#endif
244Lskip_jop_keys_\@:
245#endif /* CSWITCH_JOP_KEYS */
246
247	cbz		\wsync, 1f
248	isb 	sy
249
250#if HAS_PARAVIRTUALIZED_PAC
2511:	/* guests need to clear the sync flag even after skipping the isb, in case they synced via hvc instead */
252#endif
253#if defined(__ARM_ARCH_8_5__)
254	strb	wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH]
255#endif
2561:
257.endmacro
258
259/*
260 * void     machine_load_context(thread_t        thread)
261 *
262 * Load the context for the first thread to run on a
263 * cpu, and go.
264 */
265	.text
266	.align 2
267	.globl	EXT(machine_load_context)
268
269LEXT(machine_load_context)
270	ARM64_PROLOG
271	set_thread_registers 	x0, x1, x2
272	LOAD_KERN_STACK_TOP	dst=x1, src=x0, tmp=x2	// Get top of kernel stack
273	load_general_registers 	x1, 2
274	set_process_dependent_keys_and_sync_context	x0, x1, x2, x3, w4
275	mov		x0, #0								// Clear argument to thread_continue
276	cswitch_epilogue
277
278/*
279 *  typedef void (*thread_continue_t)(void *param, wait_result_t)
280 *
281 *	void Call_continuation( thread_continue_t continuation,
282 *	            			void *param,
283 *				            wait_result_t wresult,
284 *                          bool enable interrupts)
285 */
286	.text
287	.align	5
288	.globl	EXT(Call_continuation)
289
290LEXT(Call_continuation)
291	ARM64_PROLOG
292	mrs		x4, TPIDR_EL1						// Get the current thread pointer
293
294	/* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */
295	LOAD_KERN_STACK_TOP	dst=x5, src=x4, tmp=x6
296	mov		sp, x5								// Set stack pointer
297	mov		fp, #0								// Clear the frame pointer
298
299	set_process_dependent_keys_and_sync_context	x4, x5, x6, x7, w20
300
301	mov x20, x0  //continuation
302	mov x21, x1  //continuation parameter
303	mov x22, x2  //wait result
304
305	cbz x3, 1f
306	mov x0, #1
307	bl EXT(ml_set_interrupts_enabled)
3081:
309
310	mov		x0, x21								// Set the first parameter
311	mov		x1, x22								// Set the wait result arg
312#ifdef HAS_APPLE_PAC
313	mov		x21, THREAD_CONTINUE_T_DISC
314	blraa	x20, x21							// Branch to the continuation
315#else
316	blr		x20									// Branch to the continuation
317#endif
318	mrs		x0, TPIDR_EL1						// Get the current thread pointer
319	b		EXT(thread_terminate)				// Kill the thread
320
321
322/*
323 *	thread_t Switch_context(thread_t	old,
324 * 				void		(*cont)(void),
325 *				thread_t	new)
326 */
327	.text
328	.align 5
329	.globl	EXT(Switch_context)
330
331LEXT(Switch_context)
332	ARM64_PROLOG
333	cbnz	x1, Lswitch_threads					// Skip saving old state if blocking on continuation
334	LOAD_KERN_STACK_TOP	dst=x3, src=x0, tmp=x4	// Get the old kernel stack top
335	save_general_registers	x3, 4
336Lswitch_threads:
337	set_thread_registers	x2, x3, x4
338	LOAD_KERN_STACK_TOP	dst=x3, src=x2, tmp=x4
339	load_general_registers	x3, 4
340	set_process_dependent_keys_and_sync_context	x2, x3, x4, x5, w6
341	cswitch_epilogue
342
343/*
344 *	thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor)
345 *
346 */
347	.text
348	.align 5
349	.globl	EXT(Shutdown_context)
350
351LEXT(Shutdown_context)
352	ARM64_PROLOG
353	mrs		x10, TPIDR_EL1							// Get thread pointer
354	LOAD_KERN_STACK_TOP	dst=x11, src=x10, tmp=x12	// Get the top of the kernel stack
355	save_general_registers	x11, 12
356	msr		DAIFSet, #(DAIFSC_STANDARD_DISABLE)	// Disable interrupts
357	LOAD_INT_STACK_THREAD dst=x12, src=x10, tmp=x11
358	mov		sp, x12
359	b		EXT(cpu_doshutdown)
360
361/*
362 *	thread_t Idle_context(void)
363 *
364 */
365	.text
366	.align 5
367	.globl	EXT(Idle_context)
368
369LEXT(Idle_context)
370	ARM64_PROLOG
371	mrs		x0, TPIDR_EL1						// Get thread pointer
372	LOAD_KERN_STACK_TOP	dst=x1, src=x0, tmp=x2	// Get the top of the kernel stack
373	save_general_registers	x1, 2
374	LOAD_INT_STACK_THREAD	dst=x2, src=x0, tmp=x1
375	mov		sp, x2
376	b		EXT(cpu_idle)
377
378/*
379 *	thread_t Idle_context(void)
380 *
381 */
382	.text
383	.align 5
384	.globl	EXT(Idle_load_context)
385
386LEXT(Idle_load_context)
387	ARM64_PROLOG
388	mrs		x0, TPIDR_EL1						// Get thread pointer
389	LOAD_KERN_STACK_TOP	dst=x1, src=x0, tmp=x2	// Get the top of the kernel stack
390	load_general_registers	x1, 2
391	set_process_dependent_keys_and_sync_context	x0, x1, x2, x3, w4
392	cswitch_epilogue
393
394	.align	2
395	.globl	EXT(machine_set_current_thread)
396LEXT(machine_set_current_thread)
397	ARM64_PROLOG
398	set_thread_registers x0, x1, x2
399	ret
400
401
402/* vim: set ts=4: */
403