1/* 2 * Copyright (c) 2011-2013 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29#include <machine/asm.h> 30#include <arm64/machine_machdep.h> 31#include <arm64/machine_routines_asm.h> 32#include <arm64/proc_reg.h> 33#include <pexpert/arm64/board_config.h> 34#include <mach/exception_types.h> 35#include <mach_kdp.h> 36#include <config_dtrace.h> 37#include "assym.s" 38#include <arm64/exception_asm.h> 39#include "dwarf_unwind.h" 40 41#if __ARM_KERNEL_PROTECT__ 42#include <arm/pmap.h> 43#endif 44 45#if XNU_MONITOR 46/* 47 * CHECK_EXCEPTION_RETURN_DISPATCH_PPL 48 * 49 * Checks if an exception was taken from the PPL, and if so, trampolines back 50 * into the PPL. 51 * x26 - 0 if the exception was taken while in the kernel, 1 if the 52 * exception was taken while in the PPL. 53 */ 54.macro CHECK_EXCEPTION_RETURN_DISPATCH_PPL 55 cmp x26, xzr 56 b.eq 1f 57 58 /* Return to the PPL. */ 59 mov x15, #0 60 mov w10, #PPL_STATE_EXCEPTION 61#error "XPRR configuration error" 621: 63.endmacro 64 65 66#endif /* XNU_MONITOR */ 67 68/* 69 * MAP_KERNEL 70 * 71 * Restores the kernel EL1 mappings, if necessary. 72 * 73 * This may mutate x18. 74 */ 75.macro MAP_KERNEL 76#if __ARM_KERNEL_PROTECT__ 77 /* Switch to the kernel ASID (low bit set) for the task. */ 78 mrs x18, TTBR0_EL1 79 orr x18, x18, #(1 << TTBR_ASID_SHIFT) 80 msr TTBR0_EL1, x18 81 82 /* 83 * We eschew some barriers on Apple CPUs, as relative ordering of writes 84 * to the TTBRs and writes to the TCR should be ensured by the 85 * microarchitecture. 86 */ 87#if !defined(APPLE_ARM64_ARCH_FAMILY) 88 isb sy 89#endif 90 91 /* 92 * Update the TCR to map the kernel now that we are using the kernel 93 * ASID. 94 */ 95 MOV64 x18, TCR_EL1_BOOT 96 msr TCR_EL1, x18 97 isb sy 98#endif /* __ARM_KERNEL_PROTECT__ */ 99.endmacro 100 101/* 102 * BRANCH_TO_KVA_VECTOR 103 * 104 * Branches to the requested long exception vector in the kernelcache. 105 * arg0 - The label to branch to 106 * arg1 - The index of the label in exc_vectors_tables 107 * 108 * This may mutate x18. 109 */ 110.macro BRANCH_TO_KVA_VECTOR 111#if __ARM_KERNEL_PROTECT__ 112 /* 113 * Find the kernelcache table for the exception vectors by accessing 114 * the per-CPU data. 115 */ 116 mrs x18, TPIDR_EL1 117 ldr x18, [x18, ACT_CPUDATAP] 118 ldr x18, [x18, CPU_EXC_VECTORS] 119 120 /* 121 * Get the handler for this exception and jump to it. 122 */ 123 ldr x18, [x18, #($1 << 3)] 124 br x18 125#else 126 b $0 127#endif /* __ARM_KERNEL_PROTECT__ */ 128.endmacro 129 130/* 131 * CHECK_KERNEL_STACK 132 * 133 * Verifies that the kernel stack is aligned and mapped within an expected 134 * stack address range. Note: happens before saving registers (in case we can't 135 * save to kernel stack). 136 * 137 * Expects: 138 * {x0, x1} - saved 139 * x1 - Exception syndrome 140 * sp - Saved state 141 * 142 * Seems like we need an unused argument to the macro for the \@ syntax to work 143 * 144 */ 145.macro CHECK_KERNEL_STACK unused 146 stp x2, x3, [sp, #-16]! // Save {x2-x3} 147 and x1, x1, #ESR_EC_MASK // Mask the exception class 148 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) 149 cmp x1, x2 // If we have a stack alignment exception 150 b.eq Lcorrupt_stack_\@ // ...the stack is definitely corrupted 151 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) 152 cmp x1, x2 // If we have a data abort, we need to 153 b.ne Lvalid_stack_\@ // ...validate the stack pointer 154 mrs x0, SP_EL0 // Get SP_EL0 155 mrs x1, TPIDR_EL1 // Get thread pointer 156Ltest_kstack_\@: 157 ldr x2, [x1, TH_KSTACKPTR] // Get top of kernel stack 158 sub x3, x2, KERNEL_STACK_SIZE // Find bottom of kernel stack 159 cmp x0, x2 // if (SP_EL0 >= kstack top) 160 b.ge Ltest_istack_\@ // jump to istack test 161 cmp x0, x3 // if (SP_EL0 > kstack bottom) 162 b.gt Lvalid_stack_\@ // stack pointer valid 163Ltest_istack_\@: 164 ldr x1, [x1, ACT_CPUDATAP] // Load the cpu data ptr 165 ldr x2, [x1, CPU_INTSTACK_TOP] // Get top of istack 166 sub x3, x2, INTSTACK_SIZE_NUM // Find bottom of istack 167 cmp x0, x2 // if (SP_EL0 >= istack top) 168 b.ge Lcorrupt_stack_\@ // corrupt stack pointer 169 cmp x0, x3 // if (SP_EL0 > istack bottom) 170 b.gt Lvalid_stack_\@ // stack pointer valid 171Lcorrupt_stack_\@: 172 ldp x2, x3, [sp], #16 173 ldp x0, x1, [sp], #16 174 sub sp, sp, ARM_CONTEXT_SIZE // Allocate exception frame 175 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the exception frame 176 stp x2, x3, [sp, SS64_X2] // Save x2, x3 to the exception frame 177 mrs x0, SP_EL0 // Get SP_EL0 178 str x0, [sp, SS64_SP] // Save sp to the exception frame 179 INIT_SAVED_STATE_FLAVORS sp, w0, w1 180 mov x0, sp // Copy exception frame pointer to x0 181 adrp x1, fleh_invalid_stack@page // Load address for fleh 182 add x1, x1, fleh_invalid_stack@pageoff // fleh_dispatch64 will save register state before we get there 183 b fleh_dispatch64 184Lvalid_stack_\@: 185 ldp x2, x3, [sp], #16 // Restore {x2-x3} 186.endmacro 187 188 189#if __ARM_KERNEL_PROTECT__ 190 .section __DATA_CONST,__const 191 .align 3 192 .globl EXT(exc_vectors_table) 193LEXT(exc_vectors_table) 194 /* Table of exception handlers. 195 * These handlers sometimes contain deadloops. 196 * It's nice to have symbols for them when debugging. */ 197 .quad el1_sp0_synchronous_vector_long 198 .quad el1_sp0_irq_vector_long 199 .quad el1_sp0_fiq_vector_long 200 .quad el1_sp0_serror_vector_long 201 .quad el1_sp1_synchronous_vector_long 202 .quad el1_sp1_irq_vector_long 203 .quad el1_sp1_fiq_vector_long 204 .quad el1_sp1_serror_vector_long 205 .quad el0_synchronous_vector_64_long 206 .quad el0_irq_vector_64_long 207 .quad el0_fiq_vector_64_long 208 .quad el0_serror_vector_64_long 209#endif /* __ARM_KERNEL_PROTECT__ */ 210 211 .text 212#if __ARM_KERNEL_PROTECT__ 213 /* 214 * We need this to be on a page boundary so that we may avoiding mapping 215 * other text along with it. As this must be on the VM page boundary 216 * (due to how the coredumping code currently works), this will be a 217 * 16KB page boundary. 218 */ 219 .align 14 220#else 221 .align 12 222#endif /* __ARM_KERNEL_PROTECT__ */ 223 .globl EXT(ExceptionVectorsBase) 224LEXT(ExceptionVectorsBase) 225Lel1_sp0_synchronous_vector: 226 BRANCH_TO_KVA_VECTOR el1_sp0_synchronous_vector_long, 0 227 228 .text 229 .align 7 230Lel1_sp0_irq_vector: 231 BRANCH_TO_KVA_VECTOR el1_sp0_irq_vector_long, 1 232 233 .text 234 .align 7 235Lel1_sp0_fiq_vector: 236 BRANCH_TO_KVA_VECTOR el1_sp0_fiq_vector_long, 2 237 238 .text 239 .align 7 240Lel1_sp0_serror_vector: 241 BRANCH_TO_KVA_VECTOR el1_sp0_serror_vector_long, 3 242 243 .text 244 .align 7 245Lel1_sp1_synchronous_vector: 246 BRANCH_TO_KVA_VECTOR el1_sp1_synchronous_vector_long, 4 247 248 .text 249 .align 7 250Lel1_sp1_irq_vector: 251 BRANCH_TO_KVA_VECTOR el1_sp1_irq_vector_long, 5 252 253 .text 254 .align 7 255Lel1_sp1_fiq_vector: 256 BRANCH_TO_KVA_VECTOR el1_sp1_fiq_vector_long, 6 257 258 .text 259 .align 7 260Lel1_sp1_serror_vector: 261 BRANCH_TO_KVA_VECTOR el1_sp1_serror_vector_long, 7 262 263 .text 264 .align 7 265Lel0_synchronous_vector_64: 266 MAP_KERNEL 267 BRANCH_TO_KVA_VECTOR el0_synchronous_vector_64_long, 8 268 269 .text 270 .align 7 271Lel0_irq_vector_64: 272 MAP_KERNEL 273 BRANCH_TO_KVA_VECTOR el0_irq_vector_64_long, 9 274 275 .text 276 .align 7 277Lel0_fiq_vector_64: 278 MAP_KERNEL 279 BRANCH_TO_KVA_VECTOR el0_fiq_vector_64_long, 10 280 281 .text 282 .align 7 283Lel0_serror_vector_64: 284 MAP_KERNEL 285 BRANCH_TO_KVA_VECTOR el0_serror_vector_64_long, 11 286 287 /* Fill out the rest of the page */ 288 .align 12 289 290/********************************* 291 * END OF EXCEPTION VECTORS PAGE * 292 *********************************/ 293 294 295 296.macro EL1_SP0_VECTOR 297 msr SPSel, #0 // Switch to SP0 298 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame 299 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame 300 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer 301 str x0, [sp, SS64_SP] // Save stack pointer to exception frame 302 INIT_SAVED_STATE_FLAVORS sp, w0, w1 303 mov x0, sp // Copy saved state pointer to x0 304.endmacro 305 306el1_sp0_synchronous_vector_long: 307 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack 308 mrs x1, ESR_EL1 // Get the exception syndrome 309 /* If the stack pointer is corrupt, it will manifest either as a data abort 310 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check 311 * these quickly by testing bit 5 of the exception class. 312 */ 313 tbz x1, #(5 + ESR_EC_SHIFT), Lkernel_stack_valid 314 CHECK_KERNEL_STACK 315Lkernel_stack_valid: 316 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack 317 EL1_SP0_VECTOR 318 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh 319 add x1, x1, EXT(fleh_synchronous)@pageoff 320 b fleh_dispatch64 321 322el1_sp0_irq_vector_long: 323 EL1_SP0_VECTOR 324 SWITCH_TO_INT_STACK 325 adrp x1, EXT(fleh_irq)@page // Load address for fleh 326 add x1, x1, EXT(fleh_irq)@pageoff 327 b fleh_dispatch64 328 329el1_sp0_fiq_vector_long: 330 // ARM64_TODO write optimized decrementer 331 EL1_SP0_VECTOR 332 SWITCH_TO_INT_STACK 333 adrp x1, EXT(fleh_fiq)@page // Load address for fleh 334 add x1, x1, EXT(fleh_fiq)@pageoff 335 b fleh_dispatch64 336 337el1_sp0_serror_vector_long: 338 EL1_SP0_VECTOR 339 adrp x1, EXT(fleh_serror)@page // Load address for fleh 340 add x1, x1, EXT(fleh_serror)@pageoff 341 b fleh_dispatch64 342 343.macro EL1_SP1_VECTOR 344 sub sp, sp, ARM_CONTEXT_SIZE // Create exception frame 345 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to exception frame 346 add x0, sp, ARM_CONTEXT_SIZE // Calculate the original stack pointer 347 str x0, [sp, SS64_SP] // Save stack pointer to exception frame 348 INIT_SAVED_STATE_FLAVORS sp, w0, w1 349 mov x0, sp // Copy saved state pointer to x0 350.endmacro 351 352el1_sp1_synchronous_vector_long: 353 b check_exception_stack 354Lel1_sp1_synchronous_valid_stack: 355#if defined(KERNEL_INTEGRITY_KTRR) 356 b check_ktrr_sctlr_trap 357Lel1_sp1_synchronous_vector_continue: 358#endif 359 EL1_SP1_VECTOR 360 adrp x1, fleh_synchronous_sp1@page 361 add x1, x1, fleh_synchronous_sp1@pageoff 362 b fleh_dispatch64 363 364el1_sp1_irq_vector_long: 365 EL1_SP1_VECTOR 366 adrp x1, fleh_irq_sp1@page 367 add x1, x1, fleh_irq_sp1@pageoff 368 b fleh_dispatch64 369 370el1_sp1_fiq_vector_long: 371 EL1_SP1_VECTOR 372 adrp x1, fleh_fiq_sp1@page 373 add x1, x1, fleh_fiq_sp1@pageoff 374 b fleh_dispatch64 375 376el1_sp1_serror_vector_long: 377 EL1_SP1_VECTOR 378 adrp x1, fleh_serror_sp1@page 379 add x1, x1, fleh_serror_sp1@pageoff 380 b fleh_dispatch64 381 382 383.macro EL0_64_VECTOR 384 stp x0, x1, [sp, #-16]! // Save x0 and x1 to the exception stack 385#if __ARM_KERNEL_PROTECT__ 386 mov x18, #0 // Zero x18 to avoid leaking data to user SS 387#endif 388 mrs x0, TPIDR_EL1 // Load the thread register 389 mrs x1, SP_EL0 // Load the user stack pointer 390 add x0, x0, ACT_CONTEXT // Calculate where we store the user context pointer 391 ldr x0, [x0] // Load the user context pointer 392 str x1, [x0, SS64_SP] // Store the user stack pointer in the user PCB 393 msr SP_EL0, x0 // Copy the user PCB pointer to SP0 394 ldp x0, x1, [sp], #16 // Restore x0 and x1 from the exception stack 395 msr SPSel, #0 // Switch to SP0 396 stp x0, x1, [sp, SS64_X0] // Save x0, x1 to the user PCB 397 mrs x1, TPIDR_EL1 // Load the thread register 398 399 400 mov x0, sp // Copy the user PCB pointer to x0 401 // x1 contains thread register 402.endmacro 403 404 405el0_synchronous_vector_64_long: 406 EL0_64_VECTOR sync 407 SWITCH_TO_KERN_STACK 408 adrp x1, EXT(fleh_synchronous)@page // Load address for fleh 409 add x1, x1, EXT(fleh_synchronous)@pageoff 410 b fleh_dispatch64 411 412el0_irq_vector_64_long: 413 EL0_64_VECTOR irq 414 SWITCH_TO_INT_STACK 415 adrp x1, EXT(fleh_irq)@page // load address for fleh 416 add x1, x1, EXT(fleh_irq)@pageoff 417 b fleh_dispatch64 418 419el0_fiq_vector_64_long: 420 EL0_64_VECTOR fiq 421 SWITCH_TO_INT_STACK 422 adrp x1, EXT(fleh_fiq)@page // load address for fleh 423 add x1, x1, EXT(fleh_fiq)@pageoff 424 b fleh_dispatch64 425 426el0_serror_vector_64_long: 427 EL0_64_VECTOR serror 428 SWITCH_TO_KERN_STACK 429 adrp x1, EXT(fleh_serror)@page // load address for fleh 430 add x1, x1, EXT(fleh_serror)@pageoff 431 b fleh_dispatch64 432 433 434/* 435 * check_exception_stack 436 * 437 * Verifies that stack pointer at SP1 is within exception stack 438 * If not, will simply hang as we have no more stack to fall back on. 439 */ 440 441 .text 442 .align 2 443check_exception_stack: 444 mrs x18, TPIDR_EL1 // Get thread pointer 445 cbz x18, Lvalid_exception_stack // Thread context may not be set early in boot 446 ldr x18, [x18, ACT_CPUDATAP] 447 cbz x18, . // If thread context is set, cpu data should be too 448 ldr x18, [x18, CPU_EXCEPSTACK_TOP] 449 cmp sp, x18 450 b.gt . // Hang if above exception stack top 451 sub x18, x18, EXCEPSTACK_SIZE_NUM // Find bottom of exception stack 452 cmp sp, x18 453 b.lt . // Hang if below exception stack bottom 454Lvalid_exception_stack: 455 mov x18, #0 456 b Lel1_sp1_synchronous_valid_stack 457 458 459#if defined(KERNEL_INTEGRITY_KTRR) 460 .text 461 .align 2 462check_ktrr_sctlr_trap: 463/* We may abort on an instruction fetch on reset when enabling the MMU by 464 * writing SCTLR_EL1 because the page containing the privileged instruction is 465 * not executable at EL1 (due to KTRR). The abort happens only on SP1 which 466 * would otherwise panic unconditionally. Check for the condition and return 467 * safe execution to the caller on behalf of the faulting function. 468 * 469 * Expected register state: 470 * x22 - Kernel virtual base 471 * x23 - Kernel physical base 472 */ 473 sub sp, sp, ARM_CONTEXT_SIZE // Make some space on the stack 474 stp x0, x1, [sp, SS64_X0] // Stash x0, x1 475 mrs x0, ESR_EL1 // Check ESR for instr. fetch abort 476 and x0, x0, #0xffffffffffffffc0 // Mask off ESR.ISS.IFSC 477 movz w1, #0x8600, lsl #16 478 movk w1, #0x0000 479 cmp x0, x1 480 mrs x0, ELR_EL1 // Check for expected abort address 481 adrp x1, _pinst_set_sctlr_trap_addr@page 482 add x1, x1, _pinst_set_sctlr_trap_addr@pageoff 483 sub x1, x1, x22 // Convert to physical address 484 add x1, x1, x23 485 ccmp x0, x1, #0, eq 486 ldp x0, x1, [sp, SS64_X0] // Restore x0, x1 487 add sp, sp, ARM_CONTEXT_SIZE // Clean up stack 488 b.ne Lel1_sp1_synchronous_vector_continue 489 msr ELR_EL1, lr // Return to caller 490 ERET_CONTEXT_SYNCHRONIZING 491#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ 492 493/* 64-bit first level exception handler dispatcher. 494 * Completes register context saving and branches to FLEH. 495 * Expects: 496 * {x0, x1, sp} - saved 497 * x0 - arm_context_t 498 * x1 - address of FLEH 499 * fp - previous stack frame if EL1 500 * lr - unused 501 * sp - kernel stack 502 */ 503 .text 504 .align 2 505fleh_dispatch64: 506 /* Save arm_saved_state64 */ 507 SPILL_REGISTERS KERNEL_MODE 508 509 /* If exception is from userspace, zero unused registers */ 510 and x23, x23, #(PSR64_MODE_EL_MASK) 511 cmp x23, #(PSR64_MODE_EL0) 512 bne 1f 513 514 SANITIZE_FPCR x25, x2, 2 // x25 is set to current FPCR by SPILL_REGISTERS 5152: 516 mov x2, #0 517 mov x3, #0 518 mov x4, #0 519 mov x5, #0 520 mov x6, #0 521 mov x7, #0 522 mov x8, #0 523 mov x9, #0 524 mov x10, #0 525 mov x11, #0 526 mov x12, #0 527 mov x13, #0 528 mov x14, #0 529 mov x15, #0 530 mov x16, #0 531 mov x17, #0 532 mov x18, #0 533 mov x19, #0 534 mov x20, #0 535 /* x21, x22 cleared in common case below */ 536 mov x23, #0 537 mov x24, #0 538 mov x25, #0 539#if !XNU_MONITOR 540 mov x26, #0 541#endif 542 mov x27, #0 543 mov x28, #0 544 mov fp, #0 545 mov lr, #0 5461: 547 548 mov x21, x0 // Copy arm_context_t pointer to x21 549 mov x22, x1 // Copy handler routine to x22 550#if HAS_APPLE_PAC 551 pacia x22, sp 552#endif 553 554#if XNU_MONITOR 555 /* Zero x26 to indicate that this should not return to the PPL. */ 556 mov x26, #0 557#endif 558 559#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT 560 tst x23, PSR64_MODE_EL_MASK // If any EL MODE bits are set, we're coming from 561 b.ne 1f // kernel mode, so skip precise time update 562 PUSH_FRAME 563 bl EXT(timer_state_event_user_to_kernel) 564 POP_FRAME 565 mov x0, x21 // Reload arm_context_t pointer 5661: 567#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ 568 569 /* Dispatch to FLEH */ 570 571#if HAS_APPLE_PAC 572 braa x22,sp 573#else 574 br x22 575#endif 576 577 578 .text 579 .align 2 580 .global EXT(fleh_synchronous) 581LEXT(fleh_synchronous) 582 583UNWIND_PROLOGUE 584UNWIND_DIRECTIVES 585 586 mrs x1, ESR_EL1 // Load exception syndrome 587 mrs x2, FAR_EL1 // Load fault address 588 589 /* At this point, the LR contains the value of ELR_EL1. In the case of an 590 * instruction prefetch abort, this will be the faulting pc, which we know 591 * to be invalid. This will prevent us from backtracing through the 592 * exception if we put it in our stack frame, so we load the LR from the 593 * exception saved state instead. 594 */ 595 and w3, w1, #(ESR_EC_MASK) 596 lsr w3, w3, #(ESR_EC_SHIFT) 597 mov w4, #(ESR_EC_IABORT_EL1) 598 cmp w3, w4 599 b.eq Lfleh_sync_load_lr 600Lvalid_link_register: 601 602 PUSH_FRAME 603 bl EXT(sleh_synchronous) 604 POP_FRAME 605 606#if XNU_MONITOR 607 CHECK_EXCEPTION_RETURN_DISPATCH_PPL 608#endif 609 610 mov x28, xzr // Don't need to check PFZ if there are ASTs 611 b exception_return_dispatch 612 613Lfleh_sync_load_lr: 614 ldr lr, [x0, SS64_LR] 615 b Lvalid_link_register 616UNWIND_EPILOGUE 617 618/* Shared prologue code for fleh_irq and fleh_fiq. 619 * Does any interrupt booking we may want to do 620 * before invoking the handler proper. 621 * Expects: 622 * x0 - arm_context_t 623 * x23 - CPSR 624 * fp - Undefined live value (we may push a frame) 625 * lr - Undefined live value (we may push a frame) 626 * sp - Interrupt stack for the current CPU 627 */ 628.macro BEGIN_INTERRUPT_HANDLER 629 mrs x22, TPIDR_EL1 630 ldr x23, [x22, ACT_CPUDATAP] // Get current cpu 631 /* Update IRQ count; CPU_STAT_IRQ.* is required to be accurate for the WFE idle sequence */ 632 ldr w1, [x23, CPU_STAT_IRQ] 633 add w1, w1, #1 // Increment count 634 str w1, [x23, CPU_STAT_IRQ] // Update IRQ count 635 ldr w1, [x23, CPU_STAT_IRQ_WAKE] 636 add w1, w1, #1 // Increment count 637 str w1, [x23, CPU_STAT_IRQ_WAKE] // Update post-wake IRQ count 638 /* Increment preempt count */ 639 ldr w1, [x22, ACT_PREEMPT_CNT] 640 add w1, w1, #1 641 str w1, [x22, ACT_PREEMPT_CNT] 642 /* Store context in int state */ 643 str x0, [x23, CPU_INT_STATE] // Saved context in cpu_int_state 644.endmacro 645 646/* Shared epilogue code for fleh_irq and fleh_fiq. 647 * Cleans up after the prologue, and may do a bit more 648 * bookkeeping (kdebug related). 649 * Expects: 650 * x22 - Live TPIDR_EL1 value (thread address) 651 * x23 - Address of the current CPU data structure 652 * w24 - 0 if kdebug is disbled, nonzero otherwise 653 * fp - Undefined live value (we may push a frame) 654 * lr - Undefined live value (we may push a frame) 655 * sp - Interrupt stack for the current CPU 656 */ 657.macro END_INTERRUPT_HANDLER 658 /* Clear int context */ 659 str xzr, [x23, CPU_INT_STATE] 660 /* Decrement preempt count */ 661 ldr w0, [x22, ACT_PREEMPT_CNT] 662 cbnz w0, 1f // Detect underflow 663 b preempt_underflow 6641: 665 sub w0, w0, #1 666 str w0, [x22, ACT_PREEMPT_CNT] 667 /* Switch back to kernel stack */ 668 ldr x0, [x22, TH_KSTACKPTR] 669 mov sp, x0 670 /* Generate a CPU-local event to terminate a post-IRQ WFE */ 671 sevl 672.endmacro 673 674 .text 675 .align 2 676 .global EXT(fleh_irq) 677LEXT(fleh_irq) 678UNWIND_PROLOGUE 679UNWIND_DIRECTIVES 680 BEGIN_INTERRUPT_HANDLER 681 PUSH_FRAME 682 bl EXT(sleh_irq) 683 POP_FRAME 684 END_INTERRUPT_HANDLER 685 686#if XNU_MONITOR 687 CHECK_EXCEPTION_RETURN_DISPATCH_PPL 688#endif 689 690 mov x28, #1 // Set a bit to check PFZ if there are ASTs 691 b exception_return_dispatch 692UNWIND_EPILOGUE 693 694 .text 695 .align 2 696 .global EXT(fleh_fiq_generic) 697LEXT(fleh_fiq_generic) 698 PANIC_UNIMPLEMENTED 699 700 .text 701 .align 2 702 .global EXT(fleh_fiq) 703LEXT(fleh_fiq) 704UNWIND_PROLOGUE 705UNWIND_DIRECTIVES 706 BEGIN_INTERRUPT_HANDLER 707 PUSH_FRAME 708 bl EXT(sleh_fiq) 709 POP_FRAME 710 END_INTERRUPT_HANDLER 711 712#if XNU_MONITOR 713 CHECK_EXCEPTION_RETURN_DISPATCH_PPL 714#endif 715 716 mov x28, #1 // Set a bit to check PFZ if there are ASTs 717 b exception_return_dispatch 718UNWIND_EPILOGUE 719 720 .text 721 .align 2 722 .global EXT(fleh_serror) 723LEXT(fleh_serror) 724UNWIND_PROLOGUE 725UNWIND_DIRECTIVES 726 mrs x1, ESR_EL1 // Load exception syndrome 727 mrs x2, FAR_EL1 // Load fault address 728 729 PUSH_FRAME 730 bl EXT(sleh_serror) 731 POP_FRAME 732 733#if XNU_MONITOR 734 CHECK_EXCEPTION_RETURN_DISPATCH_PPL 735#endif 736 737 mov x28, xzr // Don't need to check PFZ If there are ASTs 738 b exception_return_dispatch 739UNWIND_EPILOGUE 740 741/* 742 * Register state saved before we get here. 743 */ 744 .text 745 .align 2 746fleh_invalid_stack: 747 mrs x1, ESR_EL1 // Load exception syndrome 748 str x1, [x0, SS64_ESR] 749 mrs x2, FAR_EL1 // Load fault address 750 str x2, [x0, SS64_FAR] 751 PUSH_FRAME 752 bl EXT(sleh_invalid_stack) // Shouldn't return! 753 b . 754 755 .text 756 .align 2 757fleh_synchronous_sp1: 758 mrs x1, ESR_EL1 // Load exception syndrome 759 str x1, [x0, SS64_ESR] 760 mrs x2, FAR_EL1 // Load fault address 761 str x2, [x0, SS64_FAR] 762 PUSH_FRAME 763 bl EXT(sleh_synchronous_sp1) 764 b . 765 766 .text 767 .align 2 768fleh_irq_sp1: 769 mov x1, x0 770 adr x0, Lsp1_irq_str 771 b EXT(panic_with_thread_kernel_state) 772Lsp1_irq_str: 773 .asciz "IRQ exception taken while SP1 selected" 774 775 .text 776 .align 2 777fleh_fiq_sp1: 778 mov x1, x0 779 adr x0, Lsp1_fiq_str 780 b EXT(panic_with_thread_kernel_state) 781Lsp1_fiq_str: 782 .asciz "FIQ exception taken while SP1 selected" 783 784 .text 785 .align 2 786fleh_serror_sp1: 787 mov x1, x0 788 adr x0, Lsp1_serror_str 789 b EXT(panic_with_thread_kernel_state) 790Lsp1_serror_str: 791 .asciz "Asynchronous exception taken while SP1 selected" 792 793 .text 794 .align 2 795exception_return_dispatch: 796 ldr w0, [x21, SS64_CPSR] 797 tst w0, PSR64_MODE_EL_MASK 798 b.ne EXT(return_to_kernel) // return to kernel if M[3:2] > 0 799 b return_to_user 800 801 .text 802 .align 2 803 .global EXT(return_to_kernel) 804LEXT(return_to_kernel) 805 tbnz w0, #DAIF_IRQF_SHIFT, exception_return // Skip AST check if IRQ disabled 806 mrs x3, TPIDR_EL1 // Load thread pointer 807 ldr w1, [x3, ACT_PREEMPT_CNT] // Load preemption count 808 msr DAIFSet, #DAIFSC_ALL // Disable exceptions 809 cbnz x1, exception_return_unint_tpidr_x3 // If preemption disabled, skip AST check 810 ldr x1, [x3, ACT_CPUDATAP] // Get current CPU data pointer 811 ldr x2, [x1, CPU_PENDING_AST] // Get ASTs 812 tst x2, AST_URGENT // If no urgent ASTs, skip ast_taken 813 b.eq exception_return_unint_tpidr_x3 814 mov sp, x21 // Switch to thread stack for preemption 815 PUSH_FRAME 816 bl EXT(ast_taken_kernel) // Handle AST_URGENT 817 POP_FRAME 818 b exception_return 819 820 .text 821 .globl EXT(thread_bootstrap_return) 822LEXT(thread_bootstrap_return) 823#if CONFIG_DTRACE 824 bl EXT(dtrace_thread_bootstrap) 825#endif 826 b EXT(arm64_thread_exception_return) 827 828 .text 829 .globl EXT(arm64_thread_exception_return) 830LEXT(arm64_thread_exception_return) 831 mrs x0, TPIDR_EL1 832 add x21, x0, ACT_CONTEXT 833 ldr x21, [x21] 834 mov x28, xzr 835 836 // 837 // Fall Through to return_to_user from arm64_thread_exception_return. 838 // Note that if we move return_to_user or insert a new routine 839 // below arm64_thread_exception_return, the latter will need to change. 840 // 841 .text 842/* x21 is always the machine context pointer when we get here 843 * x28 is a bit indicating whether or not we should check if pc is in pfz */ 844return_to_user: 845check_user_asts: 846 mrs x3, TPIDR_EL1 // Load thread pointer 847 848 movn w2, #0 849 str w2, [x3, TH_IOTIER_OVERRIDE] // Reset IO tier override to -1 before returning to user 850 851#if MACH_ASSERT 852 ldr w0, [x3, ACT_PREEMPT_CNT] 853 cbnz w0, preempt_count_notzero // Detect unbalanced enable/disable preemption 854#endif 855 856 msr DAIFSet, #DAIFSC_ALL // Disable exceptions 857 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer 858 ldr x0, [x4, CPU_PENDING_AST] // Get ASTs 859 cbz x0, no_asts // If no asts, skip ahead 860 861 cbz x28, user_take_ast // If we don't need to check PFZ, just handle asts 862 863 /* At this point, we have ASTs and we need to check whether we are running in the 864 * preemption free zone (PFZ) or not. No ASTs are handled if we are running in 865 * the PFZ since we don't want to handle getting a signal or getting suspended 866 * while holding a spinlock in userspace. 867 * 868 * If userspace was in the PFZ, we know (via coordination with the PFZ code 869 * in commpage_asm.s) that it will not be using x15 and it is therefore safe 870 * to use it to indicate to userspace to come back to take a delayed 871 * preemption, at which point the ASTs will be handled. */ 872 mov x28, xzr // Clear the "check PFZ" bit so that we don't do this again 873 mov x19, x0 // Save x0 since it will be clobbered by commpage_is_in_pfz64 874 875 ldr x0, [x21, SS64_PC] // Load pc from machine state 876 bl EXT(commpage_is_in_pfz64) // pc in pfz? 877 cbz x0, restore_and_check_ast // No, deal with other asts 878 879 mov x0, #1 880 str x0, [x21, SS64_X15] // Mark x15 for userspace to take delayed preemption 881 mov x0, x19 // restore x0 to asts 882 b no_asts // pretend we have no asts 883 884restore_and_check_ast: 885 mov x0, x19 // restore x0 886 b user_take_ast // Service pending asts 887no_asts: 888 889 890#if !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT 891 mov x19, x3 // Preserve thread pointer across function call 892 PUSH_FRAME 893 bl EXT(timer_state_event_kernel_to_user) 894 POP_FRAME 895 mov x3, x19 896#endif /* !CONFIG_SKIP_PRECISE_USER_KERNEL_TIME || HAS_FAST_CNTVCT */ 897 898#if (CONFIG_KERNEL_INTEGRITY && KERNEL_INTEGRITY_WT) 899 /* Watchtower 900 * 901 * Here we attempt to enable NEON access for EL0. If the last entry into the 902 * kernel from user-space was due to an IRQ, the monitor will have disabled 903 * NEON for EL0 _and_ access to CPACR_EL1 from EL1 (1). This forces xnu to 904 * check in with the monitor in order to reenable NEON for EL0 in exchange 905 * for routing IRQs through the monitor (2). This way the monitor will 906 * always 'own' either IRQs or EL0 NEON. 907 * 908 * If Watchtower is disabled or we did not enter the kernel through an IRQ 909 * (e.g. FIQ or syscall) this is a no-op, otherwise we will trap to EL3 910 * here. 911 * 912 * EL0 user ________ IRQ ______ 913 * EL1 xnu \ ______________________ CPACR_EL1 __/ 914 * EL3 monitor \_/ \___/ 915 * 916 * (1) (2) 917 */ 918 919 mov x0, #(CPACR_FPEN_ENABLE) 920 msr CPACR_EL1, x0 921#endif 922 923 /* Establish this thread's debug state as the live state on the selected CPU. */ 924 ldr x4, [x3, ACT_CPUDATAP] // Get current CPU data pointer 925 ldr x1, [x4, CPU_USER_DEBUG] // Get Debug context 926 ldr x0, [x3, ACT_DEBUGDATA] 927 cmp x0, x1 928 beq L_skip_user_set_debug_state // If active CPU debug state does not match thread debug state, apply thread state 929 930 931 PUSH_FRAME 932 bl EXT(arm_debug_set) // Establish thread debug state in live regs 933 POP_FRAME 934 mrs x3, TPIDR_EL1 // Reload thread pointer 935 ldr x4, [x3, ACT_CPUDATAP] // Reload CPU data pointer 936L_skip_user_set_debug_state: 937 ldrsh x0, [x4, CPU_NUMBER_GS] 938 msr TPIDR_EL0, x0 939 940 941 b exception_return_unint_tpidr_x3 942 943 // 944 // Fall through from return_to_user to exception_return. 945 // Note that if we move exception_return or add a new routine below 946 // return_to_user, the latter will have to change. 947 // 948 949exception_return: 950 msr DAIFSet, #DAIFSC_ALL // Disable exceptions 951exception_return_unint: 952 mrs x3, TPIDR_EL1 // Load thread pointer 953exception_return_unint_tpidr_x3: 954 mov sp, x21 // Reload the pcb pointer 955 956exception_return_unint_tpidr_x3_dont_trash_x18: 957 958 959#if __ARM_KERNEL_PROTECT__ 960 /* 961 * If we are going to eret to userspace, we must return through the EL0 962 * eret mapping. 963 */ 964 ldr w1, [sp, SS64_CPSR] // Load CPSR 965 tbnz w1, PSR64_MODE_EL_SHIFT, Lskip_el0_eret_mapping // Skip if returning to EL1 966 967 /* We need to switch to the EL0 mapping of this code to eret to EL0. */ 968 adrp x0, EXT(ExceptionVectorsBase)@page // Load vector base 969 adrp x1, Lexception_return_restore_registers@page // Load target PC 970 add x1, x1, Lexception_return_restore_registers@pageoff 971 MOV64 x2, ARM_KERNEL_PROTECT_EXCEPTION_START // Load EL0 vector address 972 sub x1, x1, x0 // Calculate delta 973 add x0, x2, x1 // Convert KVA to EL0 vector address 974 br x0 975 976Lskip_el0_eret_mapping: 977#endif /* __ARM_KERNEL_PROTECT__ */ 978 979Lexception_return_restore_registers: 980 mov x0, sp // x0 = &pcb 981 // Loads authed $x0->ss_64.pc into x1 and $x0->ss_64.cpsr into w2 982 AUTH_THREAD_STATE_IN_X0 x20, x21, x22, x23, x24, el0_state_allowed=1 983 984/* Restore special register state */ 985 ldr w3, [sp, NS64_FPSR] 986 ldr w4, [sp, NS64_FPCR] 987 988 msr ELR_EL1, x1 // Load the return address into ELR 989 msr SPSR_EL1, x2 // Load the return CPSR into SPSR 990 msr FPSR, x3 991 mrs x5, FPCR 992 CMSR FPCR, x5, x4, 1 9931: 994 995 996 /* Restore arm_neon_saved_state64 */ 997 ldp q0, q1, [x0, NS64_Q0] 998 ldp q2, q3, [x0, NS64_Q2] 999 ldp q4, q5, [x0, NS64_Q4] 1000 ldp q6, q7, [x0, NS64_Q6] 1001 ldp q8, q9, [x0, NS64_Q8] 1002 ldp q10, q11, [x0, NS64_Q10] 1003 ldp q12, q13, [x0, NS64_Q12] 1004 ldp q14, q15, [x0, NS64_Q14] 1005 ldp q16, q17, [x0, NS64_Q16] 1006 ldp q18, q19, [x0, NS64_Q18] 1007 ldp q20, q21, [x0, NS64_Q20] 1008 ldp q22, q23, [x0, NS64_Q22] 1009 ldp q24, q25, [x0, NS64_Q24] 1010 ldp q26, q27, [x0, NS64_Q26] 1011 ldp q28, q29, [x0, NS64_Q28] 1012 ldp q30, q31, [x0, NS64_Q30] 1013 1014 /* Restore arm_saved_state64 */ 1015 1016 // Skip x0, x1 - we're using them 1017 ldp x2, x3, [x0, SS64_X2] 1018 ldp x4, x5, [x0, SS64_X4] 1019 ldp x6, x7, [x0, SS64_X6] 1020 ldp x8, x9, [x0, SS64_X8] 1021 ldp x10, x11, [x0, SS64_X10] 1022 ldp x12, x13, [x0, SS64_X12] 1023 ldp x14, x15, [x0, SS64_X14] 1024 // Skip x16, x17 - already loaded + authed by AUTH_THREAD_STATE_IN_X0 1025 ldp x18, x19, [x0, SS64_X18] 1026 ldp x20, x21, [x0, SS64_X20] 1027 ldp x22, x23, [x0, SS64_X22] 1028 ldp x24, x25, [x0, SS64_X24] 1029 ldp x26, x27, [x0, SS64_X26] 1030 ldr x28, [x0, SS64_X28] 1031 ldr fp, [x0, SS64_FP] 1032 // Skip lr - already loaded + authed by AUTH_THREAD_STATE_IN_X0 1033 1034 // Restore stack pointer and our last two GPRs 1035 ldr x1, [x0, SS64_SP] 1036 mov sp, x1 1037 1038#if __ARM_KERNEL_PROTECT__ 1039 ldr w18, [x0, SS64_CPSR] // Stash CPSR 1040#endif /* __ARM_KERNEL_PROTECT__ */ 1041 1042 ldp x0, x1, [x0, SS64_X0] // Restore the GPRs 1043 1044#if __ARM_KERNEL_PROTECT__ 1045 /* If we are going to eret to userspace, we must unmap the kernel. */ 1046 tbnz w18, PSR64_MODE_EL_SHIFT, Lskip_ttbr1_switch 1047 1048 /* Update TCR to unmap the kernel. */ 1049 MOV64 x18, TCR_EL1_USER 1050 msr TCR_EL1, x18 1051 1052 /* 1053 * On Apple CPUs, TCR writes and TTBR writes should be ordered relative to 1054 * each other due to the microarchitecture. 1055 */ 1056#if !defined(APPLE_ARM64_ARCH_FAMILY) 1057 isb sy 1058#endif 1059 1060 /* Switch to the user ASID (low bit clear) for the task. */ 1061 mrs x18, TTBR0_EL1 1062 bic x18, x18, #(1 << TTBR_ASID_SHIFT) 1063 msr TTBR0_EL1, x18 1064 mov x18, #0 1065 1066 /* We don't need an ISB here, as the eret is synchronizing. */ 1067Lskip_ttbr1_switch: 1068#endif /* __ARM_KERNEL_PROTECT__ */ 1069 1070 ERET_CONTEXT_SYNCHRONIZING 1071 1072user_take_ast: 1073 PUSH_FRAME 1074 bl EXT(ast_taken_user) // Handle all ASTs, may return via continuation 1075 POP_FRAME 1076 b check_user_asts // Now try again 1077 1078 .text 1079 .align 2 1080preempt_underflow: 1081 mrs x0, TPIDR_EL1 1082 str x0, [sp, #-16]! // We'll print thread pointer 1083 adr x0, L_underflow_str // Format string 1084 CALL_EXTERN panic // Game over 1085 1086L_underflow_str: 1087 .asciz "Preemption count negative on thread %p" 1088.align 2 1089 1090#if MACH_ASSERT 1091 .text 1092 .align 2 1093preempt_count_notzero: 1094 mrs x0, TPIDR_EL1 1095 str x0, [sp, #-16]! // We'll print thread pointer 1096 ldr w0, [x0, ACT_PREEMPT_CNT] 1097 str w0, [sp, #8] 1098 adr x0, L_preempt_count_notzero_str // Format string 1099 CALL_EXTERN panic // Game over 1100 1101L_preempt_count_notzero_str: 1102 .asciz "preemption count not 0 on thread %p (%u)" 1103#endif /* MACH_ASSERT */ 1104 1105#if __ARM_KERNEL_PROTECT__ 1106 /* 1107 * This symbol denotes the end of the exception vector/eret range; we page 1108 * align it so that we can avoid mapping other text in the EL0 exception 1109 * vector mapping. 1110 */ 1111 .text 1112 .align 14 1113 .globl EXT(ExceptionVectorsEnd) 1114LEXT(ExceptionVectorsEnd) 1115#endif /* __ARM_KERNEL_PROTECT__ */ 1116 1117#if XNU_MONITOR 1118 1119/* 1120 * Functions to preflight the fleh handlers when the PPL has taken an exception; 1121 * mostly concerned with setting up state for the normal fleh code. 1122 */ 1123 .text 1124 .align 2 1125fleh_synchronous_from_ppl: 1126 /* Save x0. */ 1127 mov x15, x0 1128 1129 /* Grab the ESR. */ 1130 mrs x1, ESR_EL1 // Get the exception syndrome 1131 1132 /* If the stack pointer is corrupt, it will manifest either as a data abort 1133 * (syndrome 0x25) or a misaligned pointer (syndrome 0x26). We can check 1134 * these quickly by testing bit 5 of the exception class. 1135 */ 1136 tbz x1, #(5 + ESR_EC_SHIFT), Lvalid_ppl_stack 1137 mrs x0, SP_EL0 // Get SP_EL0 1138 1139 /* Perform high level checks for stack corruption. */ 1140 and x1, x1, #ESR_EC_MASK // Mask the exception class 1141 mov x2, #(ESR_EC_SP_ALIGN << ESR_EC_SHIFT) 1142 cmp x1, x2 // If we have a stack alignment exception 1143 b.eq Lcorrupt_ppl_stack // ...the stack is definitely corrupted 1144 mov x2, #(ESR_EC_DABORT_EL1 << ESR_EC_SHIFT) 1145 cmp x1, x2 // If we have a data abort, we need to 1146 b.ne Lvalid_ppl_stack // ...validate the stack pointer 1147 1148Ltest_pstack: 1149 /* Bounds check the PPL stack. */ 1150 adrp x10, EXT(pmap_stacks_start)@page 1151 ldr x10, [x10, #EXT(pmap_stacks_start)@pageoff] 1152 adrp x11, EXT(pmap_stacks_end)@page 1153 ldr x11, [x11, #EXT(pmap_stacks_end)@pageoff] 1154 cmp x0, x10 1155 b.lo Lcorrupt_ppl_stack 1156 cmp x0, x11 1157 b.hi Lcorrupt_ppl_stack 1158 1159Lvalid_ppl_stack: 1160 /* Restore x0. */ 1161 mov x0, x15 1162 1163 /* Switch back to the kernel stack. */ 1164 msr SPSel, #0 1165 GET_PMAP_CPU_DATA x5, x6, x7 1166 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP] 1167 mov sp, x6 1168 1169 /* Hand off to the synch handler. */ 1170 b EXT(fleh_synchronous) 1171 1172Lcorrupt_ppl_stack: 1173 /* Restore x0. */ 1174 mov x0, x15 1175 1176 /* Hand off to the invalid stack handler. */ 1177 b fleh_invalid_stack 1178 1179fleh_fiq_from_ppl: 1180 SWITCH_TO_INT_STACK 1181 b EXT(fleh_fiq) 1182 1183fleh_irq_from_ppl: 1184 SWITCH_TO_INT_STACK 1185 b EXT(fleh_irq) 1186 1187fleh_serror_from_ppl: 1188 GET_PMAP_CPU_DATA x5, x6, x7 1189 ldr x6, [x5, PMAP_CPU_DATA_KERN_SAVED_SP] 1190 mov sp, x6 1191 b EXT(fleh_serror) 1192 1193 1194 1195 1196 // x15: ppl call number 1197 // w10: ppl_state 1198 // x20: gxf_enter caller's DAIF 1199 .globl EXT(ppl_trampoline_start) 1200LEXT(ppl_trampoline_start) 1201 1202 1203#error "XPRR configuration error" 1204 cmp x14, x21 1205 b.ne Lppl_fail_dispatch 1206 1207 /* Verify the request ID. */ 1208 cmp x15, PMAP_COUNT 1209 b.hs Lppl_fail_dispatch 1210 1211 GET_PMAP_CPU_DATA x12, x13, x14 1212 1213 /* Mark this CPU as being in the PPL. */ 1214 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE] 1215 1216 cmp w9, #PPL_STATE_KERNEL 1217 b.eq Lppl_mark_cpu_as_dispatching 1218 1219 /* Check to see if we are trying to trap from within the PPL. */ 1220 cmp w9, #PPL_STATE_DISPATCH 1221 b.eq Lppl_fail_dispatch_ppl 1222 1223 1224 /* Ensure that we are returning from an exception. */ 1225 cmp w9, #PPL_STATE_EXCEPTION 1226 b.ne Lppl_fail_dispatch 1227 1228 // where is w10 set? 1229 // in CHECK_EXCEPTION_RETURN_DISPATCH_PPL 1230 cmp w10, #PPL_STATE_EXCEPTION 1231 b.ne Lppl_fail_dispatch 1232 1233 /* This is an exception return; set the CPU to the dispatching state. */ 1234 mov w9, #PPL_STATE_DISPATCH 1235 str w9, [x12, PMAP_CPU_DATA_PPL_STATE] 1236 1237 /* Find the save area, and return to the saved PPL context. */ 1238 ldr x0, [x12, PMAP_CPU_DATA_SAVE_AREA] 1239 mov sp, x0 1240 b EXT(return_to_ppl) 1241 1242Lppl_mark_cpu_as_dispatching: 1243 cmp w10, #PPL_STATE_KERNEL 1244 b.ne Lppl_fail_dispatch 1245 1246 /* Mark the CPU as dispatching. */ 1247 mov w13, #PPL_STATE_DISPATCH 1248 str w13, [x12, PMAP_CPU_DATA_PPL_STATE] 1249 1250 /* Switch to the regular PPL stack. */ 1251 // TODO: switch to PPL_STACK earlier in gxf_ppl_entry_handler 1252 ldr x9, [x12, PMAP_CPU_DATA_PPL_STACK] 1253 1254 // SP0 is thread stack here 1255 mov x21, sp 1256 // SP0 is now PPL stack 1257 mov sp, x9 1258 1259 /* Save the old stack pointer off in case we need it. */ 1260 str x21, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] 1261 1262 /* Get the handler for the request */ 1263 adrp x9, EXT(ppl_handler_table)@page 1264 add x9, x9, EXT(ppl_handler_table)@pageoff 1265 add x9, x9, x15, lsl #3 1266 ldr x10, [x9] 1267 1268 /* Branch to the code that will invoke the PPL request. */ 1269 b EXT(ppl_dispatch) 1270 1271Lppl_fail_dispatch_ppl: 1272 /* Switch back to the kernel stack. */ 1273 ldr x10, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] 1274 mov sp, x10 1275 1276Lppl_fail_dispatch: 1277 /* Indicate that we failed. */ 1278 mov x15, #PPL_EXIT_BAD_CALL 1279 1280 /* Move the DAIF bits into the expected register. */ 1281 mov x10, x20 1282 1283 /* Return to kernel mode. */ 1284 b ppl_return_to_kernel_mode 1285 1286Lppl_dispatch_exit: 1287 1288 /* Indicate that we are cleanly exiting the PPL. */ 1289 mov x15, #PPL_EXIT_DISPATCH 1290 1291 /* Switch back to the original (kernel thread) stack. */ 1292 mov sp, x21 1293 1294 /* Move the saved DAIF bits. */ 1295 mov x10, x20 1296 1297 /* Clear the in-flight pmap pointer */ 1298 add x13, x12, PMAP_CPU_DATA_INFLIGHT_PMAP 1299 stlr xzr, [x13] 1300 1301 /* Clear the old stack pointer. */ 1302 str xzr, [x12, PMAP_CPU_DATA_KERN_SAVED_SP] 1303 1304 /* 1305 * Mark the CPU as no longer being in the PPL. We spin if our state 1306 * machine is broken. 1307 */ 1308 ldr w9, [x12, PMAP_CPU_DATA_PPL_STATE] 1309 cmp w9, #PPL_STATE_DISPATCH 1310 b.ne . 1311 mov w9, #PPL_STATE_KERNEL 1312 str w9, [x12, PMAP_CPU_DATA_PPL_STATE] 1313 1314 /* Return to the kernel. */ 1315 b ppl_return_to_kernel_mode 1316 1317 1318 1319 .text 1320ppl_exit: 1321 /* 1322 * If we are dealing with an exception, hand off to the first level 1323 * exception handler. 1324 */ 1325 cmp x15, #PPL_EXIT_EXCEPTION 1326 b.eq Ljump_to_fleh_handler 1327 1328 /* If this was a panic call from the PPL, reinvoke panic. */ 1329 cmp x15, #PPL_EXIT_PANIC_CALL 1330 b.eq Ljump_to_panic_trap_to_debugger 1331 1332 /* 1333 * Stash off the original DAIF in the high bits of the exit code register. 1334 * We could keep this in a dedicated register, but that would require us to copy it to 1335 * an additional callee-save register below (e.g. x22), which in turn would require that 1336 * register to be saved/restored at PPL entry/exit. 1337 */ 1338 add x15, x15, x10, lsl #32 1339 1340 /* Load the preemption count. */ 1341 mrs x10, TPIDR_EL1 1342 ldr w12, [x10, ACT_PREEMPT_CNT] 1343 1344 /* Detect underflow */ 1345 cbnz w12, Lno_preempt_underflow 1346 b preempt_underflow 1347Lno_preempt_underflow: 1348 1349 /* Lower the preemption count. */ 1350 sub w12, w12, #1 1351 1352#if SCHED_PREEMPTION_DISABLE_DEBUG 1353 /* Collect preemption disable measurement if necessary. */ 1354 1355 /* Only collect measurement if this reenabled preemption. */ 1356 cmp w12, #0 1357 b.ne Lskip_collect_measurement 1358 1359 /* Only collect measurement if a start time was set. */ 1360 ldr x14, [x10, ACT_PREEMPT_ADJ_MT] 1361 cmp x14, #0 1362 b.eq Lskip_collect_measurement 1363 1364 /* Stash our return value and return reason. */ 1365 mov x20, x0 1366 mov x21, x15 1367 1368 /* Collect measurement. */ 1369 mov x0, x10 1370 bl EXT(_collect_preemption_disable_measurement) 1371 1372 /* Restore the return value and the return reason. */ 1373 mov x0, x20 1374 mov x15, x21 1375 /* ... and w12, which was 0. */ 1376 mov w12, #0 1377 1378 /* Restore the thread pointer into x10. */ 1379 mrs x10, TPIDR_EL1 1380 1381Lskip_collect_measurement: 1382#endif /* SCHED_PREEMPTION_DISABLE_DEBUG */ 1383 1384 /* Save the lowered preemption count. */ 1385 str w12, [x10, ACT_PREEMPT_CNT] 1386 1387 /* Skip ASTs if the peemption count is not zero. */ 1388 cbnz x12, Lppl_skip_ast_taken 1389 1390 /* 1391 * Skip the AST check if interrupts were originally disabled. 1392 * The original DAIF state prior to PPL entry is stored in the upper 1393 * 32 bits of x15. 1394 */ 1395 tbnz x15, #(DAIF_IRQF_SHIFT + 32), Lppl_skip_ast_taken 1396 1397 /* IF there is no urgent AST, skip the AST. */ 1398 ldr x12, [x10, ACT_CPUDATAP] 1399 ldr x14, [x12, CPU_PENDING_AST] 1400 tst x14, AST_URGENT 1401 b.eq Lppl_skip_ast_taken 1402 1403 /* Stash our return value and return reason. */ 1404 mov x20, x0 1405 mov x21, x15 1406 1407 /* Handle the AST. */ 1408 bl EXT(ast_taken_kernel) 1409 1410 /* Restore the return value and the return reason. */ 1411 mov x15, x21 1412 mov x0, x20 1413 1414Lppl_skip_ast_taken: 1415 1416 /* Extract caller DAIF from high-order bits of exit code */ 1417 ubfx x10, x15, #32, #32 1418 bfc x15, #32, #32 1419 msr DAIF, x10 1420 1421 /* Pop the stack frame. */ 1422 ldp x29, x30, [sp, #0x10] 1423 ldp x20, x21, [sp], #0x20 1424 1425 /* Check to see if this was a bad request. */ 1426 cmp x15, #PPL_EXIT_BAD_CALL 1427 b.eq Lppl_bad_call 1428 1429 /* Return. */ 1430 ARM64_STACK_EPILOG 1431 1432 .align 2 1433Ljump_to_fleh_handler: 1434 br x25 1435 1436 .align 2 1437Ljump_to_panic_trap_to_debugger: 1438 b EXT(panic_trap_to_debugger) 1439 1440Lppl_bad_call: 1441 /* Panic. */ 1442 adrp x0, Lppl_bad_call_panic_str@page 1443 add x0, x0, Lppl_bad_call_panic_str@pageoff 1444 b EXT(panic) 1445 1446 .text 1447 .align 2 1448 .globl EXT(ppl_dispatch) 1449LEXT(ppl_dispatch) 1450 /* 1451 * Save a couple of important registers (implementation detail; x12 has 1452 * the PPL per-CPU data address; x13 is not actually interesting). 1453 */ 1454 stp x12, x13, [sp, #-0x10]! 1455 1456 /* Restore the original AIF state. */ 1457 msr DAIF, x20 1458 1459 /* 1460 * Note that if the method is NULL, we'll blow up with a prefetch abort, 1461 * but the exception vectors will deal with this properly. 1462 */ 1463 1464 /* Invoke the PPL method. */ 1465#ifdef HAS_APPLE_PAC 1466 blraa x10, x9 1467#else 1468 blr x10 1469#endif 1470 1471 /* Disable AIF. */ 1472 msr DAIFSet, #(DAIFSC_STANDARD_DISABLE) 1473 1474 /* Restore those important registers. */ 1475 ldp x12, x13, [sp], #0x10 1476 1477 /* Mark this as a regular return, and hand off to the return path. */ 1478 b Lppl_dispatch_exit 1479 1480 .text 1481 .align 2 1482 .globl EXT(ppl_bootstrap_dispatch) 1483LEXT(ppl_bootstrap_dispatch) 1484 /* Verify the PPL request. */ 1485 cmp x15, PMAP_COUNT 1486 b.hs Lppl_fail_bootstrap_dispatch 1487 1488 /* Get the requested PPL routine. */ 1489 adrp x9, EXT(ppl_handler_table)@page 1490 add x9, x9, EXT(ppl_handler_table)@pageoff 1491 add x9, x9, x15, lsl #3 1492 ldr x10, [x9] 1493 1494 /* Invoke the requested PPL routine. */ 1495#ifdef HAS_APPLE_PAC 1496 blraa x10, x9 1497#else 1498 blr x10 1499#endif 1500 LOAD_PMAP_CPU_DATA x9, x10, x11 1501 1502 /* Clear the in-flight pmap pointer */ 1503 add x9, x9, PMAP_CPU_DATA_INFLIGHT_PMAP 1504 stlr xzr, [x9] 1505 1506 /* Stash off the return value */ 1507 mov x20, x0 1508 /* Drop the preemption count */ 1509 bl EXT(_enable_preemption) 1510 mov x0, x20 1511 1512 /* Pop the stack frame. */ 1513 ldp x29, x30, [sp, #0x10] 1514 ldp x20, x21, [sp], #0x20 1515#if __has_feature(ptrauth_returns) 1516 retab 1517#else 1518 ret 1519#endif 1520 1521Lppl_fail_bootstrap_dispatch: 1522 /* Pop our stack frame and panic. */ 1523 ldp x29, x30, [sp, #0x10] 1524 ldp x20, x21, [sp], #0x20 1525#if __has_feature(ptrauth_returns) 1526 autibsp 1527#endif 1528 adrp x0, Lppl_bad_call_panic_str@page 1529 add x0, x0, Lppl_bad_call_panic_str@pageoff 1530 b EXT(panic) 1531 1532 .text 1533 .align 2 1534 .globl EXT(ml_panic_trap_to_debugger) 1535LEXT(ml_panic_trap_to_debugger) 1536 mrs x10, DAIF 1537 msr DAIFSet, #(DAIFSC_STANDARD_DISABLE) 1538 1539 adrp x12, EXT(pmap_ppl_locked_down)@page 1540 ldr w12, [x12, #EXT(pmap_ppl_locked_down)@pageoff] 1541 cbz w12, Lnot_in_ppl_dispatch 1542 1543 LOAD_PMAP_CPU_DATA x11, x12, x13 1544 1545 ldr w12, [x11, PMAP_CPU_DATA_PPL_STATE] 1546 cmp w12, #PPL_STATE_DISPATCH 1547 b.ne Lnot_in_ppl_dispatch 1548 1549 /* Indicate (for the PPL->kernel transition) that we are panicking. */ 1550 mov x15, #PPL_EXIT_PANIC_CALL 1551 1552 /* Restore the old stack pointer as we can't push onto PPL stack after we exit PPL */ 1553 ldr x12, [x11, PMAP_CPU_DATA_KERN_SAVED_SP] 1554 mov sp, x12 1555 1556 mrs x10, DAIF 1557 mov w13, #PPL_STATE_PANIC 1558 str w13, [x11, PMAP_CPU_DATA_PPL_STATE] 1559 1560 /* Now we are ready to exit the PPL. */ 1561 b ppl_return_to_kernel_mode 1562Lnot_in_ppl_dispatch: 1563 msr DAIF, x10 1564 ret 1565 1566 .data 1567Lppl_bad_call_panic_str: 1568 .asciz "ppl_dispatch: failed due to bad arguments/state" 1569#else /* XNU_MONITOR */ 1570 .text 1571 .align 2 1572 .globl EXT(ml_panic_trap_to_debugger) 1573LEXT(ml_panic_trap_to_debugger) 1574 ret 1575#endif /* XNU_MONITOR */ 1576 1577/* ARM64_TODO Is globals_asm.h needed? */ 1578//#include "globals_asm.h" 1579 1580/* vim: set ts=4: */ 1581