1/* 2 * Copyright (c) 2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28#include <machine/asm.h> 29#include <arm64/machine_machdep.h> 30#include <arm64/machine_routines_asm.h> 31#include <arm64/pac_asm.h> 32#include <arm64/proc_reg.h> 33#include "assym.s" 34 35/* 36 * save_general_registers 37 * 38 * Saves variable registers to kernel PCB. 39 * arg0 - thread_kernel_state pointer 40 * arg1 - Scratch register 41 */ 42 43.macro save_general_registers 44/* AAPCS-64 Page 14 45 * 46 * A subroutine invocation must preserve the contents of the registers r19-r29 47 * and SP. 48 */ 49#if __has_feature(ptrauth_calls) 50 paciasp 51#endif 52 stp x19, x20, [$0, SS64_KERNEL_X19] 53 stp x21, x22, [$0, SS64_KERNEL_X21] 54 stp x23, x24, [$0, SS64_KERNEL_X23] 55 stp x25, x26, [$0, SS64_KERNEL_X25] 56 stp x27, x28, [$0, SS64_KERNEL_X27] 57 stp fp, lr, [$0, SS64_KERNEL_FP] 58 strb wzr, [$0, SS64_KERNEL_PC_WAS_IN_USER] 59 mov x$1, sp 60 str x$1, [$0, SS64_KERNEL_SP] 61#if HAS_ARM_FEAT_SSBS2 62#if APPLEVIRTUALPLATFORM 63 adrp x$1, EXT(gARM_FEAT_SSBS)@page 64 ldrh w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff] 65 cbz x$1, 1f 66#endif 67 mrs x$1, SSBS 68 lsr x$1, x$1, #0 + PSR64_SSBS_SHIFT_64 69 strb w$1, [$0, SS64_KERNEL_SSBS] 701: 71#endif // HAS_ARM_FEAT_SSBS2 72#if __ARM_ARCH_8_4__ 73 mrs x$1, DIT 74 lsr x$1, x$1, #0 + PSR64_DIT_SHIFT 75 strb w$1, [$0, SS64_KERNEL_DIT] 76#endif //__ARM_ARCH_8_4__ 77#if __ARM_ARCH_8_2__ 78 mrs x$1, UAO 79 lsr x$1, x$1, #0 + PSR64_UAO_SHIFT 80 strb w$1, [$0, SS64_KERNEL_UAO] 81#endif //__ARM_ARCH_8_2__ 82 83/* AAPCS-64 Page 14 84 * 85 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine 86 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved 87 * (or should be preserved by the caller). 88 */ 89 str d8, [$0, NS64_KERNEL_D8] 90 str d9, [$0, NS64_KERNEL_D9] 91 str d10,[$0, NS64_KERNEL_D10] 92 str d11,[$0, NS64_KERNEL_D11] 93 str d12,[$0, NS64_KERNEL_D12] 94 str d13,[$0, NS64_KERNEL_D13] 95 str d14,[$0, NS64_KERNEL_D14] 96 str d15,[$0, NS64_KERNEL_D15] 97 98 mrs x$1, FPCR 99 str w$1, [$0, NS64_KERNEL_FPCR] 100.endmacro 101 102/* 103 * load_general_registers 104 * 105 * Loads variable registers from kernel PCB. 106 * arg0 - thread_kernel_state pointer 107 * arg1 - Scratch register 108 */ 109.macro load_general_registers 110 ldr w$1, [$0, NS64_KERNEL_FPCR] 111 mrs x19, FPCR 112 CMSR FPCR, x19, x$1, 1 1131: 114 115 ldp x19, x20, [$0, SS64_KERNEL_X19] 116 ldp x21, x22, [$0, SS64_KERNEL_X21] 117 ldp x23, x24, [$0, SS64_KERNEL_X23] 118 ldp x25, x26, [$0, SS64_KERNEL_X25] 119 ldp x27, x28, [$0, SS64_KERNEL_X27] 120 ldp fp, lr, [$0, SS64_KERNEL_FP] 121 ldr x$1, [$0, SS64_KERNEL_SP] 122 mov sp, x$1 123#if HAS_ARM_FEAT_SSBS2 124#if APPLEVIRTUALPLATFORM 125 adrp x$1, EXT(gARM_FEAT_SSBS)@page 126 ldrh w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff] 127 cbz x$1, 1f 128#endif // APPLEVIRTUALPLATFORM 129 ldrb w$1, [$0, SS64_KERNEL_SSBS] 130 lsl x$1, x$1, #0 + PSR64_SSBS_SHIFT_64 131 msr SSBS, x$1 1321: 133#endif // HAS_ARM_FEAT_SSBS2 134#if __ARM_ARCH_8_2__ 135 ldrb w$1, [$0, SS64_KERNEL_UAO] 136 lsl x$1, x$1, #0 + PSR64_UAO_SHIFT 137 msr UAO, x$1 138#endif //__ARM_ARCH_8_2__ 139#if __ARM_ARCH_8_4__ 140 ldrb w$1, [$0, SS64_KERNEL_DIT] 141 lsl x$1, x$1, #0 + PSR64_DIT_SHIFT 142 msr DIT, x$1 143#endif //__ARM_ARCH_8_4__ 144 145 ldr d8, [$0, NS64_KERNEL_D8] 146 ldr d9, [$0, NS64_KERNEL_D9] 147 ldr d10,[$0, NS64_KERNEL_D10] 148 ldr d11,[$0, NS64_KERNEL_D11] 149 ldr d12,[$0, NS64_KERNEL_D12] 150 ldr d13,[$0, NS64_KERNEL_D13] 151 ldr d14,[$0, NS64_KERNEL_D14] 152 ldr d15,[$0, NS64_KERNEL_D15] 153.endmacro 154 155/* 156 * cswitch_epilogue 157 * 158 * Returns to the address reloaded into LR, authenticating if needed. 159 */ 160.macro cswitch_epilogue 161#if __has_feature(ptrauth_calls) 162 retaa 163#else 164 ret 165#endif 166.endm 167 168 169/* 170 * set_thread_registers 171 * 172 * Updates thread registers during context switch 173 * arg0 - New thread pointer 174 * arg1 - Scratch register 175 * arg2 - Scratch register 176 */ 177.macro set_thread_registers 178 msr TPIDR_EL1, $0 // Write new thread pointer to TPIDR_EL1 179 ldr $1, [$0, ACT_CPUDATAP] 180 str $0, [$1, CPU_ACTIVE_THREAD] 181 182 ldr $2, [$1, CPU_TPIDR_EL0] // Write encoded CPU info to TPIDR_EL0 183 msr TPIDR_EL0, $2 184 185 ldr $1, [$0, TH_CTH_SELF] // Get cthread pointer 186 msr TPIDRRO_EL0, $1 187 188 ldr $1, [$0, TH_THREAD_ID] // Save the bottom 32-bits of the thread ID into 189 msr CONTEXTIDR_EL1, $1 // CONTEXTIDR_EL1 (top 32-bits are RES0). 190.endmacro 191 192#define CSWITCH_ROP_KEYS (HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC) 193#define CSWITCH_JOP_KEYS (HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC) 194 195/* 196 * set_process_dependent_keys_and_sync_context 197 * 198 * Updates process dependent keys and issues explicit context sync during context switch if necessary 199 * Per CPU Data rop_key is initialized in arm_init() for bootstrap processor 200 * and in cpu_data_init for slave processors 201 * 202 * thread - New thread pointer 203 * new_key - Scratch register: New Thread Key 204 * tmp_key - Scratch register: Current CPU Key 205 * cpudatap - Scratch register: Current CPU Data pointer 206 * wsync - Half-width scratch register: CPU sync required flag 207 * 208 * to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5, 209 * we just use wsync to keep track of needing an ISB 210 */ 211.macro set_process_dependent_keys_and_sync_context thread, new_key, tmp_key, cpudatap, wsync 212 213 214#if defined(HAS_APPLE_PAC) 215 ldr \cpudatap, [\thread, ACT_CPUDATAP] 216#endif /* defined(HAS_APPLE_PAC) */ 217 218 mov \wsync, #0 219 220#if CSWITCH_ROP_KEYS 221 ldr \new_key, [\thread, TH_ROP_PID] 222 REPROGRAM_ROP_KEYS Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key 223#if HAS_PARAVIRTUALIZED_PAC 224 /* xnu hypervisor guarantees context synchronization during guest re-entry */ 225 mov \wsync, #0 226#else 227 mov \wsync, #1 228#endif 229Lskip_rop_keys_\@: 230#endif /* CSWITCH_ROP_KEYS */ 231 232#if CSWITCH_JOP_KEYS 233 ldr \new_key, [\thread, TH_JOP_PID] 234 REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key 235#if HAS_PARAVIRTUALIZED_PAC 236 mov \wsync, #0 237#else 238 mov \wsync, #1 239#endif 240Lskip_jop_keys_\@: 241#endif /* CSWITCH_JOP_KEYS */ 242 243 cbnz \wsync, Lsync_now_\@ 244 b 1f 245 246Lsync_now_\@: 247 isb sy 248 249#if HAS_PARAVIRTUALIZED_PAC 2501: /* guests need to clear the sync flag even after skipping the isb, in case they synced via hvc instead */ 251#endif 252 strb wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH] 2531: 254.endmacro 255 256/* 257 * void machine_load_context(thread_t thread) 258 * 259 * Load the context for the first thread to run on a 260 * cpu, and go. 261 */ 262 .text 263 .align 2 264 .globl EXT(machine_load_context) 265 266LEXT(machine_load_context) 267 ARM64_PROLOG 268 set_thread_registers x0, x1, x2 269 LOAD_KERN_STACK_TOP dst=x1, src=x0, tmp=x2 // Get top of kernel stack 270 load_general_registers x1, 2 271 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 272 mov x0, #0 // Clear argument to thread_continue 273 cswitch_epilogue 274 275/* 276 * typedef void (*thread_continue_t)(void *param, wait_result_t) 277 * 278 * void Call_continuation( thread_continue_t continuation, 279 * void *param, 280 * wait_result_t wresult, 281 * bool enable interrupts) 282 */ 283 .text 284 .align 5 285 .globl EXT(Call_continuation) 286 287LEXT(Call_continuation) 288 ARM64_PROLOG 289 mrs x4, TPIDR_EL1 // Get the current thread pointer 290 291 /* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */ 292 LOAD_KERN_STACK_TOP dst=x5, src=x4, tmp=x6 293 mov sp, x5 // Set stack pointer 294 mov fp, #0 // Clear the frame pointer 295 296 set_process_dependent_keys_and_sync_context x4, x5, x6, x7, w20 297 298 mov x20, x0 //continuation 299 mov x21, x1 //continuation parameter 300 mov x22, x2 //wait result 301 302 cbz x3, 1f 303 mov x0, #1 304 bl EXT(ml_set_interrupts_enabled) 3051: 306 307 mov x0, x21 // Set the first parameter 308 mov x1, x22 // Set the wait result arg 309#ifdef HAS_APPLE_PAC 310 mov x21, THREAD_CONTINUE_T_DISC 311 blraa x20, x21 // Branch to the continuation 312#else 313 blr x20 // Branch to the continuation 314#endif 315 mrs x0, TPIDR_EL1 // Get the current thread pointer 316 b EXT(thread_terminate) // Kill the thread 317 318 319/* 320 * thread_t Switch_context(thread_t old, 321 * void (*cont)(void), 322 * thread_t new) 323 */ 324 .text 325 .align 5 326 .globl EXT(Switch_context) 327 328LEXT(Switch_context) 329 ARM64_PROLOG 330 cbnz x1, Lswitch_threads // Skip saving old state if blocking on continuation 331 LOAD_KERN_STACK_TOP dst=x3, src=x0, tmp=x4 // Get the old kernel stack top 332 save_general_registers x3, 4 333Lswitch_threads: 334 set_thread_registers x2, x3, x4 335 LOAD_KERN_STACK_TOP dst=x3, src=x2, tmp=x4 336 load_general_registers x3, 4 337 set_process_dependent_keys_and_sync_context x2, x3, x4, x5, w6 338 cswitch_epilogue 339 340/* 341 * thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor) 342 * 343 */ 344 .text 345 .align 5 346 .globl EXT(Shutdown_context) 347 348LEXT(Shutdown_context) 349 ARM64_PROLOG 350 mrs x10, TPIDR_EL1 // Get thread pointer 351 LOAD_KERN_STACK_TOP dst=x11, src=x10, tmp=x12 // Get the top of the kernel stack 352 save_general_registers x11, 12 353 msr DAIFSet, #(DAIFSC_STANDARD_DISABLE) // Disable interrupts 354 LOAD_INT_STACK_THREAD dst=x12, src=x10, tmp=x11 355 mov sp, x12 356 b EXT(cpu_doshutdown) 357 358/* 359 * thread_t Idle_context(void) 360 * 361 */ 362 .text 363 .align 5 364 .globl EXT(Idle_context) 365 366LEXT(Idle_context) 367 ARM64_PROLOG 368 mrs x0, TPIDR_EL1 // Get thread pointer 369 LOAD_KERN_STACK_TOP dst=x1, src=x0, tmp=x2 // Get the top of the kernel stack 370 save_general_registers x1, 2 371 LOAD_INT_STACK_THREAD dst=x2, src=x0, tmp=x1 372 mov sp, x2 373 b EXT(cpu_idle) 374 375/* 376 * thread_t Idle_context(void) 377 * 378 */ 379 .text 380 .align 5 381 .globl EXT(Idle_load_context) 382 383LEXT(Idle_load_context) 384 ARM64_PROLOG 385 mrs x0, TPIDR_EL1 // Get thread pointer 386 LOAD_KERN_STACK_TOP dst=x1, src=x0, tmp=x2 // Get the top of the kernel stack 387 load_general_registers x1, 2 388 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 389 cswitch_epilogue 390 391 .align 2 392 .globl EXT(machine_set_current_thread) 393LEXT(machine_set_current_thread) 394 ARM64_PROLOG 395 set_thread_registers x0, x1, x2 396 ret 397 398 399/* vim: set ts=4: */ 400