1/* 2 * Copyright (c) 2007 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28#include <machine/asm.h> 29#include <arm64/machine_machdep.h> 30#include <arm64/machine_routines_asm.h> 31#include <arm64/pac_asm.h> 32#include <arm64/proc_reg.h> 33#include "assym.s" 34 35/* 36 * save_general_registers 37 * 38 * Saves variable registers to kernel PCB. 39 * arg0 - thread_kernel_state pointer 40 * arg1 - Scratch register 41 */ 42 43.macro save_general_registers 44/* AAPCS-64 Page 14 45 * 46 * A subroutine invocation must preserve the contents of the registers r19-r29 47 * and SP. 48 */ 49#if __has_feature(ptrauth_calls) 50 paciasp 51#endif 52 stp x19, x20, [$0, SS64_KERNEL_X19] 53 stp x21, x22, [$0, SS64_KERNEL_X21] 54 stp x23, x24, [$0, SS64_KERNEL_X23] 55 stp x25, x26, [$0, SS64_KERNEL_X25] 56 stp x27, x28, [$0, SS64_KERNEL_X27] 57 stp fp, lr, [$0, SS64_KERNEL_FP] 58 strb wzr, [$0, SS64_KERNEL_PC_WAS_IN_USER] 59 mov x$1, sp 60 str x$1, [$0, SS64_KERNEL_SP] 61#if HAS_ARM_FEAT_SSBS2 62#if APPLEVIRTUALPLATFORM 63 adrp x$1, EXT(gARM_FEAT_SSBS)@page 64 ldrh w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff] 65 cbz x$1, 1f 66#endif 67 mrs x$1, SSBS 68 lsr x$1, x$1, #0 + PSR64_SSBS_SHIFT_64 69 strb w$1, [$0, SS64_KERNEL_SSBS] 701: 71#endif // HAS_ARM_FEAT_SSBS2 72#if __ARM_ARCH_8_4__ 73 mrs x$1, DIT 74 lsr x$1, x$1, #0 + PSR64_DIT_SHIFT 75 strb w$1, [$0, SS64_KERNEL_DIT] 76#endif //__ARM_ARCH_8_4__ 77#if __ARM_ARCH_8_2__ 78 mrs x$1, UAO 79 lsr x$1, x$1, #0 + PSR64_UAO_SHIFT 80 strb w$1, [$0, SS64_KERNEL_UAO] 81#endif //__ARM_ARCH_8_2__ 82 83/* AAPCS-64 Page 14 84 * 85 * Registers d8-d15 (s8-s15) must be preserved by a callee across subroutine 86 * calls; the remaining registers (v0-v7, v16-v31) do not need to be preserved 87 * (or should be preserved by the caller). 88 */ 89 str d8, [$0, NS64_KERNEL_D8] 90 str d9, [$0, NS64_KERNEL_D9] 91 str d10,[$0, NS64_KERNEL_D10] 92 str d11,[$0, NS64_KERNEL_D11] 93 str d12,[$0, NS64_KERNEL_D12] 94 str d13,[$0, NS64_KERNEL_D13] 95 str d14,[$0, NS64_KERNEL_D14] 96 str d15,[$0, NS64_KERNEL_D15] 97 98 mrs x$1, FPCR 99 str w$1, [$0, NS64_KERNEL_FPCR] 100.endmacro 101 102/* 103 * load_general_registers 104 * 105 * Loads variable registers from kernel PCB. 106 * arg0 - thread_kernel_state pointer 107 * arg1 - Scratch register 108 */ 109.macro load_general_registers 110 ldr w$1, [$0, NS64_KERNEL_FPCR] 111 mrs x19, FPCR 112 CMSR FPCR, x19, x$1, 1 1131: 114 115 ldp x19, x20, [$0, SS64_KERNEL_X19] 116 ldp x21, x22, [$0, SS64_KERNEL_X21] 117 ldp x23, x24, [$0, SS64_KERNEL_X23] 118 ldp x25, x26, [$0, SS64_KERNEL_X25] 119 ldp x27, x28, [$0, SS64_KERNEL_X27] 120 ldp fp, lr, [$0, SS64_KERNEL_FP] 121 ldr x$1, [$0, SS64_KERNEL_SP] 122 mov sp, x$1 123#if HAS_ARM_FEAT_SSBS2 124#if APPLEVIRTUALPLATFORM 125 adrp x$1, EXT(gARM_FEAT_SSBS)@page 126 ldrh w$1, [x$1, EXT(gARM_FEAT_SSBS)@pageoff] 127 cbz x$1, 1f 128#endif // APPLEVIRTUALPLATFORM 129 ldrb w$1, [$0, SS64_KERNEL_SSBS] 130 lsl x$1, x$1, #0 + PSR64_SSBS_SHIFT_64 131 msr SSBS, x$1 1321: 133#endif // HAS_ARM_FEAT_SSBS2 134#if __ARM_ARCH_8_4__ 135 ldrb w$1, [$0, SS64_KERNEL_DIT] 136 lsl x$1, x$1, #0 + PSR64_DIT_SHIFT 137 msr DIT, x$1 138#endif //__ARM_ARCH_8_4__ 139#if __ARM_ARCH_8_2__ 140 ldrb w$1, [$0, SS64_KERNEL_UAO] 141 lsl x$1, x$1, #0 + PSR64_UAO_SHIFT 142 msr UAO, x$1 143#endif //__ARM_ARCH_8_4__ 144 145 ldr d8, [$0, NS64_KERNEL_D8] 146 ldr d9, [$0, NS64_KERNEL_D9] 147 ldr d10,[$0, NS64_KERNEL_D10] 148 ldr d11,[$0, NS64_KERNEL_D11] 149 ldr d12,[$0, NS64_KERNEL_D12] 150 ldr d13,[$0, NS64_KERNEL_D13] 151 ldr d14,[$0, NS64_KERNEL_D14] 152 ldr d15,[$0, NS64_KERNEL_D15] 153.endmacro 154 155/* 156 * cswitch_epilogue 157 * 158 * Returns to the address reloaded into LR, authenticating if needed. 159 */ 160.macro cswitch_epilogue 161#if __has_feature(ptrauth_calls) 162 retaa 163#else 164 ret 165#endif 166.endm 167 168 169/* 170 * set_thread_registers 171 * 172 * Updates thread registers during context switch 173 * arg0 - New thread pointer 174 * arg1 - Scratch register 175 * arg2 - Scratch register 176 */ 177.macro set_thread_registers 178 msr TPIDR_EL1, $0 // Write new thread pointer to TPIDR_EL1 179 ldr $1, [$0, ACT_CPUDATAP] 180 str $0, [$1, CPU_ACTIVE_THREAD] 181 182 ldrsh $2, [$1, CPU_NUMBER_GS] 183 msr TPIDR_EL0, $2 184 185 ldr $1, [$0, TH_CTH_SELF] // Get cthread pointer 186 msr TPIDRRO_EL0, $1 187 188#if DEBUG || DEVELOPMENT 189 ldr $1, [$0, TH_THREAD_ID] // Save the bottom 32-bits of the thread ID into 190 msr CONTEXTIDR_EL1, $1 // CONTEXTIDR_EL1 (top 32-bits are RES0). 191#endif /* DEBUG || DEVELOPMENT */ 192.endmacro 193 194#define CSWITCH_ROP_KEYS (HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC) 195#define CSWITCH_JOP_KEYS (HAS_APPLE_PAC && HAS_PARAVIRTUALIZED_PAC) 196 197/* 198 * set_process_dependent_keys_and_sync_context 199 * 200 * Updates process dependent keys and issues explicit context sync during context switch if necessary 201 * Per CPU Data rop_key is initialized in arm_init() for bootstrap processor 202 * and in cpu_data_init for slave processors 203 * 204 * thread - New thread pointer 205 * new_key - Scratch register: New Thread Key 206 * tmp_key - Scratch register: Current CPU Key 207 * cpudatap - Scratch register: Current CPU Data pointer 208 * wsync - Half-width scratch register: CPU sync required flag 209 * 210 * to save on ISBs, for ARMv8.5 we use the CPU_SYNC_ON_CSWITCH field, cached in wsync, for pre-ARMv8.5, 211 * we just use wsync to keep track of needing an ISB 212 */ 213.macro set_process_dependent_keys_and_sync_context thread, new_key, tmp_key, cpudatap, wsync 214 215 216#if defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) 217 ldr \cpudatap, [\thread, ACT_CPUDATAP] 218#endif /* defined(__ARM_ARCH_8_5__) || defined(HAS_APPLE_PAC) */ 219 220#if defined(__ARM_ARCH_8_5__) 221 ldrb \wsync, [\cpudatap, CPU_SYNC_ON_CSWITCH] 222#else /* defined(__ARM_ARCH_8_5__) */ 223 mov \wsync, #0 224#endif 225 226#if CSWITCH_ROP_KEYS 227 ldr \new_key, [\thread, TH_ROP_PID] 228 REPROGRAM_ROP_KEYS Lskip_rop_keys_\@, \new_key, \cpudatap, \tmp_key 229#if HAS_PARAVIRTUALIZED_PAC 230 /* xnu hypervisor guarantees context synchronization during guest re-entry */ 231 mov \wsync, #0 232#else 233 mov \wsync, #1 234#endif 235Lskip_rop_keys_\@: 236#endif /* CSWITCH_ROP_KEYS */ 237 238#if CSWITCH_JOP_KEYS 239 ldr \new_key, [\thread, TH_JOP_PID] 240 REPROGRAM_JOP_KEYS Lskip_jop_keys_\@, \new_key, \cpudatap, \tmp_key 241#if HAS_PARAVIRTUALIZED_PAC 242 mov \wsync, #0 243#else 244 mov \wsync, #1 245#endif 246Lskip_jop_keys_\@: 247#endif /* CSWITCH_JOP_KEYS */ 248 249 cbz \wsync, 1f 250 isb sy 251 252#if HAS_PARAVIRTUALIZED_PAC 2531: /* guests need to clear the sync flag even after skipping the isb, in case they synced via hvc instead */ 254#endif 255#if defined(__ARM_ARCH_8_5__) 256 strb wzr, [\cpudatap, CPU_SYNC_ON_CSWITCH] 257#endif 2581: 259.endmacro 260 261/* 262 * void machine_load_context(thread_t thread) 263 * 264 * Load the context for the first thread to run on a 265 * cpu, and go. 266 */ 267 .text 268 .align 2 269 .globl EXT(machine_load_context) 270 271LEXT(machine_load_context) 272 set_thread_registers x0, x1, x2 273 ldr x1, [x0, TH_KSTACKPTR] // Get top of kernel stack 274 load_general_registers x1, 2 275 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 276 mov x0, #0 // Clear argument to thread_continue 277 cswitch_epilogue 278 279/* 280 * typedef void (*thread_continue_t)(void *param, wait_result_t) 281 * 282 * void Call_continuation( thread_continue_t continuation, 283 * void *param, 284 * wait_result_t wresult, 285 * bool enable interrupts) 286 */ 287 .text 288 .align 5 289 .globl EXT(Call_continuation) 290 291LEXT(Call_continuation) 292 mrs x4, TPIDR_EL1 // Get the current thread pointer 293 294 /* ARM64_TODO arm loads the kstack top instead of arg4. What should we use? */ 295 ldr x5, [x4, TH_KSTACKPTR] // Get the top of the kernel stack 296 mov sp, x5 // Set stack pointer 297 mov fp, #0 // Clear the frame pointer 298 299 set_process_dependent_keys_and_sync_context x4, x5, x6, x7, w20 300 301 mov x20, x0 //continuation 302 mov x21, x1 //continuation parameter 303 mov x22, x2 //wait result 304 305 cbz x3, 1f 306 mov x0, #1 307 bl EXT(ml_set_interrupts_enabled) 3081: 309 310 mov x0, x21 // Set the first parameter 311 mov x1, x22 // Set the wait result arg 312#ifdef HAS_APPLE_PAC 313 mov x21, THREAD_CONTINUE_T_DISC 314 blraa x20, x21 // Branch to the continuation 315#else 316 blr x20 // Branch to the continuation 317#endif 318 mrs x0, TPIDR_EL1 // Get the current thread pointer 319 b EXT(thread_terminate) // Kill the thread 320 321 322/* 323 * thread_t Switch_context(thread_t old, 324 * void (*cont)(void), 325 * thread_t new) 326 */ 327 .text 328 .align 5 329 .globl EXT(Switch_context) 330 331LEXT(Switch_context) 332 cbnz x1, Lswitch_threads // Skip saving old state if blocking on continuation 333 ldr x3, [x0, TH_KSTACKPTR] // Get the old kernel stack top 334 save_general_registers x3, 4 335Lswitch_threads: 336 set_thread_registers x2, x3, x4 337 ldr x3, [x2, TH_KSTACKPTR] 338 load_general_registers x3, 4 339 set_process_dependent_keys_and_sync_context x2, x3, x4, x5, w6 340 cswitch_epilogue 341 342/* 343 * thread_t Shutdown_context(void (*doshutdown)(processor_t), processor_t processor) 344 * 345 */ 346 .text 347 .align 5 348 .globl EXT(Shutdown_context) 349 350LEXT(Shutdown_context) 351 mrs x10, TPIDR_EL1 // Get thread pointer 352 ldr x11, [x10, TH_KSTACKPTR] // Get the top of the kernel stack 353 save_general_registers x11, 12 354 msr DAIFSet, #(DAIFSC_FIQF | DAIFSC_IRQF) // Disable interrupts 355 ldr x11, [x10, ACT_CPUDATAP] // Get current cpu 356 ldr x12, [x11, CPU_ISTACKPTR] // Switch to interrupt stack 357 mov sp, x12 358 b EXT(cpu_doshutdown) 359 360/* 361 * thread_t Idle_context(void) 362 * 363 */ 364 .text 365 .align 5 366 .globl EXT(Idle_context) 367 368LEXT(Idle_context) 369 mrs x0, TPIDR_EL1 // Get thread pointer 370 ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack 371 save_general_registers x1, 2 372 ldr x1, [x0, ACT_CPUDATAP] // Get current cpu 373 ldr x2, [x1, CPU_ISTACKPTR] // Switch to interrupt stack 374 mov sp, x2 375 b EXT(cpu_idle) 376 377/* 378 * thread_t Idle_context(void) 379 * 380 */ 381 .text 382 .align 5 383 .globl EXT(Idle_load_context) 384 385LEXT(Idle_load_context) 386 mrs x0, TPIDR_EL1 // Get thread pointer 387 ldr x1, [x0, TH_KSTACKPTR] // Get the top of the kernel stack 388 load_general_registers x1, 2 389 set_process_dependent_keys_and_sync_context x0, x1, x2, x3, w4 390 cswitch_epilogue 391 392 .align 2 393 .globl EXT(machine_set_current_thread) 394LEXT(machine_set_current_thread) 395 set_thread_registers x0, x1, x2 396 ret 397 398 399/* vim: set ts=4: */ 400