1/* 2 * Copyright (c) 2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28#include <machine/asm.h> 29#include <arm64/machine_machdep.h> 30#include <arm64/machine_routines_asm.h> 31#include <arm64/pac_asm.h> 32#include <arm64/proc_reg.h> 33#include "assym.s" 34 35/* 36 * save_general_registers 37 * 38 * Saves variable registers to kernel PCB. 39 * arg0 - thread_kernel_state pointer 40 * arg1 - Scratch register 41 */ 42 43.macro save_general_registers 44/* AAPCS-64 Page 14 45 * 46 * A subroutine invocation must preserve the contents of the registers r19-r29 47 * and SP. 48 */ 49#if __has_feature(ptrauth_calls) 50 paciasp 51#endif 52 stp x19, x20, [$0, SS64_KERNEL_X19] 53 stp x21, x22, [$0, SS64_KERNEL_X21] 54 stp x23, x24, [$0, SS64_KERNEL_X23] 55 stp x25, x26, [$0, SS64_KERNEL_X25] 56 stp x27, x28, [$0, SS64_KERNEL_X27] 57 stp fp, lr, [$0, SS64_KERNEL_FP] 58 strb wzr, [$0, SS64_KERNEL_PC_WAS_IN_USER] 59 mov x$1, sp 60 str x$1, [$0, SS64_KERNEL_SP] 61#if HAS_ARM_FEAT_SSBS2 62#if APPLEVIRTUALPLATFORM 63 adrp x$1, EXT(gARM_FEAT_SSBS)@page 64 ldrh w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff] 65 cbz x$1, 1f 66#endif 67 mrs x$1, SSBS 68 lsr x$1, x$1, #0 + PSR64_SSBS_SHIFT_64 69 strb w$1, [$0, SS64_KERNEL_SSBS] 701: 71#endif // HAS_ARM_FEAT_SSBS2 72#if __ARM_ARCH_8_4__ 73 mrs x$1, DIT 74 lsr x$1, x$1, #0 + PSR64_DIT_SHIFT 75 strb w$1, [$0, SS64_KERNEL_DIT] 76#endif //__ARM_ARCH_8_4__ 77#if __ARM_ARCH_8_2__ 78 mrs x$1, UAO 79 lsr x$1, x$1, #0 + PSR64_UAO_SHIFT 80 strb w$1, [$0, SS64_KERNEL_UAO] 81#endif //__ARM_ARCH_8_2__ 82 83/* AAPCS-64 Page 14 84 * 85 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine 86 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved 87 * (or should be preserved by the caller). 88 */ 89 str d8, [$0, NS64_KERNEL_D8] 90 str d9, [$0, NS64_KERNEL_D9] 91 str d10,[$0, NS64_KERNEL_D10] 92 str d11,[$0, NS64_KERNEL_D11] 93 str d12,[$0, NS64_KERNEL_D12] 94 str d13,[$0, NS64_KERNEL_D13] 95 str d14,[$0, NS64_KERNEL_D14] 96 str d15,[$0, NS64_KERNEL_D15] 97 98 mrs x$1, FPCR 99 str w$1, [$0, NS64_KERNEL_FPCR] 100.endmacro 101 102/* 103 * load_general_registers 104 * 105 * Loads variable registers from kernel PCB. 106 * arg0 - thread_kernel_state pointer 107 * arg1 - Scratch register 108 */ 109.macro load_general_registers 110 ldr w$1, [$0, NS64_KERNEL_FPCR] 111 mrs x19, FPCR 112 CMSR FPCR, x19, x$1, 1 1131: 114 115 ldp x19, x20, [$0, SS64_KERNEL_X19] 116 ldp x21, x22, [$0, SS64_KERNEL_X21] 117 ldp x23, x24, [$0, SS64_KERNEL_X23] 118 ldp x25, x26, [$0, SS64_KERNEL_X25] 119 ldp x27, x28, [$0, SS64_KERNEL_X27] 120 ldp fp, lr, [$0, SS64_KERNEL_FP] 121 ldr x$1, [$0, SS64_KERNEL_SP] 122 mov sp, x$1 123#if HAS_ARM_FEAT_SSBS2 124#if APPLEVIRTUALPLATFORM 125 adrp x$1, EXT(gARM_FEAT_SSBS)@page 126 ldrh w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff] 127 cbz x$1, 1f 128#endif // APPLEVIRTUALPLATFORM 129 ldrb w$1, [$0, SS64_KERNEL_SSBS] 130 lsl x$1, x$1, #0 + PSR64_SSBS_SHIFT_64 131 msr SSBS, x$1 1321: 133#endif // HAS_ARM_FEAT_SSBS2 134#if __ARM_ARCH_8_2__ 135 ldrb w$1, [$0, SS64_KERNEL_UAO] 136 lsl x$1, x$1, #0 + PSR64_UAO_SHIFT 137 msr UAO, x$1 138#endif //__ARM_ARCH_8_2__ 139#if __ARM_ARCH_8_4__ 140 ldrb w$1, [$0, SS64_KERNEL_DIT] 141 lsl x$1, x$1, #0 + PSR64_DIT_SHIFT 142 msr DIT, x$1 143#endif //__ARM_ARCH_8_4__ 144 145 ldr d8, [$0, NS64_KERNEL_D8] 146 ldr d9, [$0, NS64_KERNEL_D9] 147 ldr d10,[$0, NS64_KERNEL_D10] 148 ldr d11,[$0, NS64_KERNEL_D11] 149 ldr d12,[$0, NS64_KERNEL_D12] 150 ldr d13,[$0, NS64_KERNEL_D13] 151 ldr d14,[$0, NS64_KERNEL_D14] 152 ldr d15,[$0, NS64_KERNEL_D15] 153.endmacro 154 155/* 156 * cswitch_epilogue 157 * 158 * Returns to the address reloaded into LR, authenticating if needed. 159 */ 160.macro cswitch_epilogue 161#if __has_feature(ptrauth_calls) 162 retaa 163#else 164 ret 165#endif 166.endm 167 168 169/* 170 * set_thread_registers 171 * 172 * Updates thread registers during context switch 173 * arg0 - New thread pointer 174 * arg1 - Scratch register 175 * arg2 - Scratch register 176 */ 177.macro set_thread_registers 178 msr TPIDR_EL1, $0 // Write new thread pointer to TPIDR_EL1 179 ldr $1, [$0, ACT_CPUDATAP] 180 str $0, [$1, CPU_ACTIVE_THREAD] 181 182 ldr $2, [$1, CPU_TPIDR_EL0] // Write encoded CPU info to TPIDR_EL0 183 msr TPIDR_EL0, $2 184 185 ldr $1, [$0, TH_CTH_SELF] // Get cthread pointer 186 msr TPIDRRO_EL0, $1 187 188 ldr $1, [$0, TH_THREAD_ID] // Save the bottom 32-bits of the thread ID into 189 msr CONTEXTIDR_EL1, $1 // CONTEXTIDR_EL1 (top 32-bits are RES0). 190.endmacro 191 192#define CSWITCH_ROP_KEYS (HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC) 193#define CSWITCH_JOP_KEYS (HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC) 194 195/* 196 * set_process_dependent_keys_and_sync_context 197 * 198 * Updates process dependent keys and issues explicit context sync during context switch if necessary 199 * Per CPU Data rop_key is initialized in arm_init() for bootstrap processor 200 * and in cpu_data_init for slave processors 201 * 202 * thread - New thread pointer 203 * new_key - Scratch register: New Thread Key 204 * tmp_key - Scratch register: Current CPU Key 205 * cpudatap - Scratch register: Current CPU Data pointer 206 * wsync - Half-width scratch register: CPU sync required flag 207 * 208 * to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5, 209 * we just use wsync to keep track of needing an ISB 210 */ 211.macro set_process_dependent_keys_and_sync_context thread, new_key, tmp_key, cpudatap, wsync 212 213 214#if defined(HAS_APPLE_PAC) 215 ldr \cpudatap, [\thread, ACT_CPUDATAP] 216#endif /* defined(HAS_APPLE_PAC) */ 217 218 mov \wsync, #0 219 220#if CSWITCH_ROP_KEYS 221 ldr \new_key, [\thread, TH_ROP_PID] 222 REPROGRAM_ROP_KEYS Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key 223#if HAS_PARAVIRTUALIZED_PAC 224 /* xnu hypervisor guarantees context synchronization during guest re-entry */ 225 mov \wsync, #0 226#else 227 mov \wsync, #1 228#endif 229Lskip_rop_keys_\@: 230#endif /* CSWITCH_ROP_KEYS */ 231 232#if CSWITCH_JOP_KEYS 233 ldr \new_key, [\thread, TH_JOP_PID] 234 REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key 235#if HAS_PARAVIRTUALIZED_PAC 236 mov \wsync, #0 237#else 238 mov \wsync, #1 239#endif 240Lskip_jop_keys_\@: 241#endif /* CSWITCH_JOP_KEYS */ 242 243 cbz \wsync, 1f 244 isb sy 245 246#if HAS_PARAVIRTUALIZED_PAC 2471: /* guests need to clear the sync flag even after skipping the isb, in case they synced via hvc instead */ 248#endif 249 strb wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH] 2501: 251.endmacro 252 253/* 254 * void machine_load_context(thread_t thread) 255 * 256 * Load the context for the first thread to run on a 257 * cpu, and go. 258 */ 259 .text 260 .align 2 261 .globl EXT(machine_load_context) 262 263LEXT(machine_load_context) 264 ARM64_PROLOG 265 set_thread_registers x0, x1, x2 266 LOAD_KERN_STACK_TOP dst=x1, src=x0, tmp=x2 // Get top of kernel stack 267 load_general_registers x1, 2 268 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 269 mov x0, #0 // Clear argument to thread_continue 270 cswitch_epilogue 271 272/* 273 * typedef void (*thread_continue_t)(void *param, wait_result_t) 274 * 275 * void Call_continuation( thread_continue_t continuation, 276 * void *param, 277 * wait_result_t wresult, 278 * bool enable interrupts) 279 */ 280 .text 281 .align 5 282 .globl EXT(Call_continuation) 283 284LEXT(Call_continuation) 285 ARM64_PROLOG 286 mrs x4, TPIDR_EL1 // Get the current thread pointer 287 288 /* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */ 289 LOAD_KERN_STACK_TOP dst=x5, src=x4, tmp=x6 290 mov sp, x5 // Set stack pointer 291 mov fp, #0 // Clear the frame pointer 292 293 set_process_dependent_keys_and_sync_context x4, x5, x6, x7, w20 294 295 mov x20, x0 //continuation 296 mov x21, x1 //continuation parameter 297 mov x22, x2 //wait result 298 299 cbz x3, 1f 300 mov x0, #1 301 bl EXT(ml_set_interrupts_enabled) 3021: 303 304 mov x0, x21 // Set the first parameter 305 mov x1, x22 // Set the wait result arg 306#ifdef HAS_APPLE_PAC 307 mov x21, THREAD_CONTINUE_T_DISC 308 blraa x20, x21 // Branch to the continuation 309#else 310 blr x20 // Branch to the continuation 311#endif 312 mrs x0, TPIDR_EL1 // Get the current thread pointer 313 b EXT(thread_terminate) // Kill the thread 314 315 316/* 317 * thread_t Switch_context(thread_t old, 318 * void (*cont)(void), 319 * thread_t new) 320 */ 321 .text 322 .align 5 323 .globl EXT(Switch_context) 324 325LEXT(Switch_context) 326 ARM64_PROLOG 327 cbnz x1, Lswitch_threads // Skip saving old state if blocking on continuation 328 LOAD_KERN_STACK_TOP dst=x3, src=x0, tmp=x4 // Get the old kernel stack top 329 save_general_registers x3, 4 330Lswitch_threads: 331 set_thread_registers x2, x3, x4 332 LOAD_KERN_STACK_TOP dst=x3, src=x2, tmp=x4 333 load_general_registers x3, 4 334 set_process_dependent_keys_and_sync_context x2, x3, x4, x5, w6 335 cswitch_epilogue 336 337/* 338 * thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor) 339 * 340 */ 341 .text 342 .align 5 343 .globl EXT(Shutdown_context) 344 345LEXT(Shutdown_context) 346 ARM64_PROLOG 347 mrs x10, TPIDR_EL1 // Get thread pointer 348 LOAD_KERN_STACK_TOP dst=x11, src=x10, tmp=x12 // Get the top of the kernel stack 349 save_general_registers x11, 12 350 msr DAIFSet, #(DAIFSC_STANDARD_DISABLE) // Disable interrupts 351 LOAD_INT_STACK_THREAD dst=x12, src=x10, tmp=x11 352 mov sp, x12 353 b EXT(cpu_doshutdown) 354 355/* 356 * thread_t Idle_context(void) 357 * 358 */ 359 .text 360 .align 5 361 .globl EXT(Idle_context) 362 363LEXT(Idle_context) 364 ARM64_PROLOG 365 mrs x0, TPIDR_EL1 // Get thread pointer 366 LOAD_KERN_STACK_TOP dst=x1, src=x0, tmp=x2 // Get the top of the kernel stack 367 save_general_registers x1, 2 368 LOAD_INT_STACK_THREAD dst=x2, src=x0, tmp=x1 369 mov sp, x2 370 b EXT(cpu_idle) 371 372/* 373 * thread_t Idle_context(void) 374 * 375 */ 376 .text 377 .align 5 378 .globl EXT(Idle_load_context) 379 380LEXT(Idle_load_context) 381 ARM64_PROLOG 382 mrs x0, TPIDR_EL1 // Get thread pointer 383 LOAD_KERN_STACK_TOP dst=x1, src=x0, tmp=x2 // Get the top of the kernel stack 384 load_general_registers x1, 2 385 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 386 cswitch_epilogue 387 388 .align 2 389 .globl EXT(machine_set_current_thread) 390LEXT(machine_set_current_thread) 391 ARM64_PROLOG 392 set_thread_registers x0, x1, x2 393 ret 394 395 396/* vim: set ts=4: */ 397