1/* 2 * Copyright (c) 2007-2022 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28#include "assym.s" 29#include <arm64/asm.h> 30#include <arm64/proc_reg.h> 31#include <arm64/machine_machdep.h> 32#include <arm64/proc_reg.h> 33#include <pexpert/arm64/board_config.h> 34#include <mach_assert.h> 35#include <machine/asm.h> 36#include <arm64/tunables/tunables.s> 37#include <arm64/exception_asm.h> 38 39#if __ARM_KERNEL_PROTECT__ 40#include <arm/pmap.h> 41#endif /* __ARM_KERNEL_PROTECT__ */ 42 43 44 45.macro MSR_VBAR_EL1_X0 46#if defined(KERNEL_INTEGRITY_KTRR) 47 mov x1, lr 48 bl EXT(pinst_set_vbar) 49 mov lr, x1 50#else 51 msr VBAR_EL1, x0 52#endif 53.endmacro 54 55.macro MSR_TCR_EL1_X1 56#if defined(KERNEL_INTEGRITY_KTRR) 57 mov x0, x1 58 mov x1, lr 59 bl EXT(pinst_set_tcr) 60 mov lr, x1 61#else 62 msr TCR_EL1, x1 63#endif 64.endmacro 65 66.macro MSR_TTBR1_EL1_X0 67#if defined(KERNEL_INTEGRITY_KTRR) 68 mov x1, lr 69 bl EXT(pinst_set_ttbr1) 70 mov lr, x1 71#else 72 msr TTBR1_EL1, x0 73#endif 74.endmacro 75 76.macro MSR_SCTLR_EL1_X0 77#if defined(KERNEL_INTEGRITY_KTRR) 78 mov x1, lr 79 80 // This may abort, do so on SP1 81 bl EXT(pinst_spsel_1) 82 83 bl EXT(pinst_set_sctlr) 84 msr SPSel, #0 // Back to SP0 85 mov lr, x1 86#else 87 msr SCTLR_EL1, x0 88#endif /* defined(KERNEL_INTEGRITY_KTRR) */ 89.endmacro 90 91/* 92 * Checks the reset handler for global and CPU-specific reset-assist functions, 93 * then jumps to the reset handler with boot args and cpu data. This is copied 94 * to the first physical page during CPU bootstrap (see cpu.c). 95 * 96 * Variables: 97 * x19 - Reset handler data pointer 98 * x20 - Boot args pointer 99 * x21 - CPU data pointer 100 */ 101 .text 102 .align 12 103 .globl EXT(LowResetVectorBase) 104LEXT(LowResetVectorBase) 105 /* 106 * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1, 107 * so on reset the CPU will jump to offset 0x0 and on exceptions 108 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380. 109 * In order for both the reset vector and exception vectors to 110 * coexist in the same space, the reset code is moved to the end 111 * of the exception vector area. 112 */ 113 b EXT(reset_vector) 114 115 /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */ 116 .align 9 117 b . 118 .align 7 119 b . 120 .align 7 121 b . 122 .align 7 123 b . 124 125 .align 7 126 .globl EXT(reset_vector) 127LEXT(reset_vector) 128 // Preserve x0 for start_first_cpu, if called 129 // Unlock the core for debugging 130 msr OSLAR_EL1, xzr 131 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts 132 133#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) 134 // Set low reset vector before attempting any loads 135 adrp x0, EXT(LowExceptionVectorBase)@page 136 add x0, x0, EXT(LowExceptionVectorBase)@pageoff 137 msr VBAR_EL1, x0 138#endif 139 140 141 142 // Process reset handlers 143 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data 144 add x19, x19, EXT(ResetHandlerData)@pageoff 145 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number 146#if HAS_CLUSTER 147 and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1 148#else 149 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 150#endif 151 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries 152 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) 153Lcheck_cpu_data_entry: 154 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address 155 cbz x21, Lnext_cpu_data_entry 156 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id 157 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu 158 b.eq Lfound_cpu_data_entry // Branch if match 159Lnext_cpu_data_entry: 160 add x1, x1, #16 // Increment to the next cpu data entry 161 cmp x1, x3 162 b.eq Lskip_cpu_reset_handler // Not found 163 b Lcheck_cpu_data_entry // loop 164Lfound_cpu_data_entry: 165 166#ifdef APPLEEVEREST 167 /* 168 * On H15, we need to configure PIO-only tunables and to apply 169 * PIO lockdown as early as possible. 170 */ 171 SET_PIO_ONLY_REGISTERS x21, x2, x3, x4, x5, x6 172#endif /* APPLEEVEREST */ 173 174 adrp x20, EXT(const_boot_args)@page 175 add x20, x20, EXT(const_boot_args)@pageoff 176 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler 177 cbz x0, Lskip_cpu_reset_handler 178 179 // Validate that our handler is one of the two expected handlers 180 adrp x2, EXT(resume_idle_cpu)@page 181 add x2, x2, EXT(resume_idle_cpu)@pageoff 182 cmp x0, x2 183 beq 1f 184 adrp x2, EXT(start_cpu)@page 185 add x2, x2, EXT(start_cpu)@pageoff 186 cmp x0, x2 187 bne Lskip_cpu_reset_handler 1881: 189 190#if HAS_BP_RET 191 bl EXT(set_bp_ret) 192#endif 193 194#if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR) 195 /* 196 * Populate TPIDR_EL1 (in case the CPU takes an exception while 197 * turning on the MMU). 198 */ 199 ldr x13, [x21, CPU_ACTIVE_THREAD] 200 msr TPIDR_EL1, x13 201#endif /* __ARM_KERNEL_PROTECT__ */ 202 203 blr x0 204Lskip_cpu_reset_handler: 205 b . // Hang if the handler is NULL or returns 206 207 .align 3 208 .global EXT(LowResetVectorEnd) 209LEXT(LowResetVectorEnd) 210 .global EXT(SleepToken) 211#if WITH_CLASSIC_S2R 212LEXT(SleepToken) 213 .space (stSize_NUM),0 214#endif 215 216 .section __DATA_CONST,__const 217 .align 3 218 .globl EXT(ResetHandlerData) 219LEXT(ResetHandlerData) 220 .space (rhdSize_NUM),0 // (filled with 0s) 221 .text 222 223 224/* 225 * __start trampoline is located at a position relative to LowResetVectorBase 226 * so that iBoot can compute the reset vector position to set IORVBAR using 227 * only the kernel entry point. Reset vector = (__start & ~0xfff) 228 */ 229 .align 3 230 .globl EXT(_start) 231LEXT(_start) 232 b EXT(start_first_cpu) 233 234 235/* 236 * Provides an early-boot exception vector so that the processor will spin 237 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap 238 * code triggers an exception. This is copied to the second physical page 239 * during CPU bootstrap (see cpu.c). 240 */ 241 .align 12, 0 242 .global EXT(LowExceptionVectorBase) 243LEXT(LowExceptionVectorBase) 244 /* EL1 SP 0 */ 245 b . 246 .align 7 247 b . 248 .align 7 249 b . 250 .align 7 251 b . 252 /* EL1 SP1 */ 253 .align 7 254 b . 255 .align 7 256 b . 257 .align 7 258 b . 259 .align 7 260 b . 261 /* EL0 64 */ 262 .align 7 263 b . 264 .align 7 265 b . 266 .align 7 267 b . 268 .align 7 269 b . 270 /* EL0 32 */ 271 .align 7 272 b . 273 .align 7 274 b . 275 .align 7 276 b . 277 .align 7 278 b . 279 .align 12, 0 280 281#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 282/* 283 * Provide a global symbol so that we can narrow the V=P mapping to cover 284 * this page during arm_vm_init. 285 */ 286.align ARM_PGSHIFT 287.globl EXT(bootstrap_instructions) 288LEXT(bootstrap_instructions) 289 290#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ 291 .align 2 292 .globl EXT(resume_idle_cpu) 293LEXT(resume_idle_cpu) 294 adrp lr, EXT(arm_init_idle_cpu)@page 295 add lr, lr, EXT(arm_init_idle_cpu)@pageoff 296 b start_cpu 297 298 .align 2 299 .globl EXT(start_cpu) 300LEXT(start_cpu) 301 adrp lr, EXT(arm_init_cpu)@page 302 add lr, lr, EXT(arm_init_cpu)@pageoff 303 b start_cpu 304 305 .align 2 306start_cpu: 307#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 308 // This is done right away in reset vector for pre-KTRR devices 309 // Set low reset vector now that we are in the KTRR-free zone 310 adrp x0, EXT(LowExceptionVectorBase)@page 311 add x0, x0, EXT(LowExceptionVectorBase)@pageoff 312 MSR_VBAR_EL1_X0 313#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ 314 315 // x20 set to BootArgs phys address 316 // x21 set to cpu data phys address 317 318 // Get the kernel memory parameters from the boot args 319 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base 320 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base 321 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size 322 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables 323 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags 324 325 326 // Set TPIDR_EL0 with cached CPU info 327 ldr x0, [x21, CPU_TPIDR_EL0] 328 msr TPIDR_EL0, x0 329 330 // Set TPIDRRO_EL0 to 0 331 msr TPIDRRO_EL0, xzr 332 333 334 // Set the exception stack pointer 335 ldr x0, [x21, CPU_EXCEPSTACK_TOP] 336 337 338 // Set SP_EL1 to exception stack 339#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 340 mov x1, lr 341 bl EXT(pinst_spsel_1) 342 mov lr, x1 343#else 344 msr SPSel, #1 345#endif 346 mov sp, x0 347 348 // Set the interrupt stack pointer 349 ldr x0, [x21, CPU_INTSTACK_TOP] 350 msr SPSel, #0 351 mov sp, x0 352 353 // Convert lr to KVA 354 add lr, lr, x22 355 sub lr, lr, x23 356 357 b common_start 358 359/* 360 * create_l1_table_entry 361 * 362 * Given a virtual address, creates a table entry in an L1 translation table 363 * to point to an L2 translation table. 364 * arg0 - Virtual address 365 * arg1 - L1 table address 366 * arg2 - L2 table address 367 * arg3 - Scratch register 368 * arg4 - Scratch register 369 * arg5 - Scratch register 370 */ 371.macro create_l1_table_entry 372 and $3, $0, #(ARM_TT_L1_INDEX_MASK) 373 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table 374 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset 375 add $3, $1, $3 // Get L1 entry pointer 376 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template 377 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table 378 orr $5, $4, $5 // Create table entry for L2 table 379 str $5, [$3] // Write entry to L1 table 380.endmacro 381 382/* 383 * create_l2_block_entries 384 * 385 * Given base virtual and physical addresses, creates consecutive block entries 386 * in an L2 translation table. 387 * arg0 - Virtual address 388 * arg1 - Physical address 389 * arg2 - L2 table address 390 * arg3 - Number of entries 391 * arg4 - Scratch register 392 * arg5 - Scratch register 393 * arg6 - Scratch register 394 * arg7 - Scratch register 395 */ 396.macro create_l2_block_entries 397 and $4, $0, #(ARM_TT_L2_INDEX_MASK) 398 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry 399 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset 400 add $4, $2, $4 // Get L2 entry pointer 401 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template 402 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping 403 orr $6, $5, $6 404 mov $5, $3 405 mov $7, #(ARM_TT_L2_SIZE) 4061: 407 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance 408 add $6, $6, $7 // Increment the output address 409 subs $5, $5, #1 // Decrement the number of entries 410 b.ne 1b 411.endmacro 412 413/* 414 * arg0 - virtual start address 415 * arg1 - physical start address 416 * arg2 - number of entries to map 417 * arg3 - L1 table address 418 * arg4 - free space pointer 419 * arg5 - scratch (entries mapped per loop) 420 * arg6 - scratch 421 * arg7 - scratch 422 * arg8 - scratch 423 * arg9 - scratch 424 */ 425.macro create_bootstrap_mapping 426 /* calculate entries left in this page */ 427 and $5, $0, #(ARM_TT_L2_INDEX_MASK) 428 lsr $5, $5, #(ARM_TT_L2_SHIFT) 429 mov $6, #(TTE_PGENTRIES) 430 sub $5, $6, $5 431 432 /* allocate an L2 table */ 4333: add $4, $4, PGBYTES 434 435 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */ 436 create_l1_table_entry $0, $3, $4, $6, $7, $8 437 438 /* determine how many entries to map this loop - the smaller of entries 439 * remaining in page and total entries left */ 440 cmp $2, $5 441 csel $5, $2, $5, lt 442 443 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */ 444 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9 445 446 /* subtract entries just mapped and bail out if we're done */ 447 subs $2, $2, $5 448 beq 2f 449 450 /* entries left to map - advance base pointers */ 451 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT) 452 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT) 453 454 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */ 455 b 3b 4562: 457.endmacro 458 459/* 460 * _start_first_cpu 461 * Cold boot init routine. Called from __start 462 * x0 - Boot args 463 */ 464 .align 2 465 .globl EXT(start_first_cpu) 466LEXT(start_first_cpu) 467 468 // Unlock the core for debugging 469 msr OSLAR_EL1, xzr 470 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts 471 472 mov x20, x0 473 mov x21, #0 474 475 // Set low reset vector before attempting any loads 476 adrp x0, EXT(LowExceptionVectorBase)@page 477 add x0, x0, EXT(LowExceptionVectorBase)@pageoff 478 MSR_VBAR_EL1_X0 479 480 481 // Get the kernel memory parameters from the boot args 482 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base 483 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base 484 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size 485 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables 486 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags 487 488 // Clear the registers that will be used to store the userspace thread pointer and CPU number. 489 // We may not actually be booting from ordinal CPU 0, so this register will be updated 490 // in ml_parse_cpu_topology(), which happens later in bootstrap. 491 msr TPIDRRO_EL0, xzr 492 msr TPIDR_EL0, xzr 493 494 // Set up exception stack pointer 495 adrp x0, EXT(excepstack_top)@page // Load top of exception stack 496 add x0, x0, EXT(excepstack_top)@pageoff 497 add x0, x0, x22 // Convert to KVA 498 sub x0, x0, x23 499 500 // Set SP_EL1 to exception stack 501#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 502 bl EXT(pinst_spsel_1) 503#else 504 msr SPSel, #1 505#endif 506 507 mov sp, x0 508 509 // Set up interrupt stack pointer 510 adrp x0, EXT(intstack_top)@page // Load top of irq stack 511 add x0, x0, EXT(intstack_top)@pageoff 512 add x0, x0, x22 // Convert to KVA 513 sub x0, x0, x23 514 msr SPSel, #0 // Set SP_EL0 to interrupt stack 515 mov sp, x0 516 517 // Load address to the C init routine into link register 518 adrp lr, EXT(arm_init)@page 519 add lr, lr, EXT(arm_init)@pageoff 520 add lr, lr, x22 // Convert to KVA 521 sub lr, lr, x23 522 523 /* 524 * Set up the bootstrap page tables with a single block entry for the V=P 525 * mapping, a single block entry for the trampolined kernel address (KVA), 526 * and all else invalid. This requires four pages: 527 * Page 1 - V=P L1 table 528 * Page 2 - V=P L2 table 529 * Page 3 - KVA L1 table 530 * Page 4 - KVA L2 table 531 */ 532 533 // Invalidate all entries in the bootstrap page tables 534 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template 535 mov x1, x25 // Start at V=P pagetable root 536 mov x2, #(TTE_PGENTRIES) // Load number of entries per page 537 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages 538 539Linvalidate_bootstrap: // do { 540 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance 541 subs x2, x2, #1 // entries-- 542 b.ne Linvalidate_bootstrap // } while (entries != 0) 543 544 /* 545 * In order to reclaim memory on targets where TZ0 (or some other entity) 546 * must be located at the base of memory, iBoot may set the virtual and 547 * physical base addresses to immediately follow whatever lies at the 548 * base of physical memory. 549 * 550 * If the base address belongs to TZ0, it may be dangerous for xnu to map 551 * it (as it may be prefetched, despite being technically inaccessible). 552 * In order to avoid this issue while keeping the mapping code simple, we 553 * may continue to use block mappings, but we will only map the kernelcache 554 * mach header to the end of memory. 555 * 556 * Given that iBoot guarantees that the unslid kernelcache base address 557 * will begin on an L2 boundary, this should prevent us from accidentally 558 * mapping TZ0. 559 */ 560 adrp x0, EXT(_mh_execute_header)@page // address of kernel mach header 561 add x0, x0, EXT(_mh_execute_header)@pageoff 562 ldr w1, [x0, #0x18] // load mach_header->flags 563 tbz w1, #0x1f, Lkernelcache_base_found // if MH_DYLIB_IN_CACHE unset, base is kernel mach header 564 ldr w1, [x0, #0x20] // load first segment cmd (offset sizeof(kernel_mach_header_t)) 565 cmp w1, #0x19 // must be LC_SEGMENT_64 566 bne . 567 ldr x1, [x0, #0x38] // load first segment vmaddr 568 sub x1, x0, x1 // compute slide 569 MOV64 x0, VM_KERNEL_LINK_ADDRESS 570 add x0, x0, x1 // base is kernel link address + slide 571 572Lkernelcache_base_found: 573 /* 574 * Adjust physical and virtual base addresses to account for physical 575 * memory preceeding xnu Mach-O header 576 * x22 - Kernel virtual base 577 * x23 - Kernel physical base 578 * x24 - Physical memory size 579 */ 580 sub x18, x0, x23 581 sub x24, x24, x18 582 add x22, x22, x18 583 add x23, x23, x18 584 585 /* 586 * x0 - V=P virtual cursor 587 * x4 - V=P physical cursor 588 * x14 - KVA virtual cursor 589 * x15 - KVA physical cursor 590 */ 591 mov x4, x0 592 mov x14, x22 593 mov x15, x23 594 595 /* 596 * Allocate L1 tables 597 * x1 - V=P L1 page 598 * x3 - KVA L1 page 599 * x2 - free mem pointer from which we allocate a variable number of L2 600 * pages. The maximum number of bootstrap page table pages is limited to 601 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case 602 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so 603 * 8 total pages for V=P and KVA. 604 */ 605 mov x1, x25 606 add x3, x1, PGBYTES 607 mov x2, x3 608 609 /* 610 * Setup the V=P bootstrap mapping 611 * x5 - total number of L2 entries to allocate 612 */ 613 lsr x5, x24, #(ARM_TT_L2_SHIFT) 614 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */ 615 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13 616 617 /* Setup the KVA bootstrap mapping */ 618 lsr x5, x24, #(ARM_TT_L2_SHIFT) 619 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13 620 621 /* Ensure TTEs are visible */ 622 dsb ish 623 624 625 b common_start 626 627/* 628 * Begin common CPU initialization 629 * 630 * Regster state: 631 * x20 - PA of boot args 632 * x21 - zero on cold boot, PA of cpu data on warm reset 633 * x22 - Kernel virtual base 634 * x23 - Kernel physical base 635 * x25 - PA of the V=P pagetable root 636 * lr - KVA of C init routine 637 * sp - SP_EL0 selected 638 * 639 * SP_EL0 - KVA of CPU's interrupt stack 640 * SP_EL1 - KVA of CPU's exception stack 641 * TPIDRRO_EL0 - CPU number 642 */ 643common_start: 644 645#if HAS_NEX_PG 646 mov x19, lr 647 bl EXT(set_nex_pg) 648 mov lr, x19 649#endif 650 651 // Set the translation control register. 652 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure 653 add x0, x0, EXT(sysreg_restore)@pageoff 654 ldr x1, [x0, SR_RESTORE_TCR_EL1] 655 MSR_TCR_EL1_X1 656 657 /* Set up translation table base registers. 658 * TTBR0 - V=P table @ top of kernel 659 * TTBR1 - KVA table @ top of kernel + 1 page 660 */ 661#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 662 /* Note that for KTRR configurations, the V=P map will be modified by 663 * arm_vm_init.c. 664 */ 665#endif 666 and x0, x25, #(TTBR_BADDR_MASK) 667 mov x19, lr 668 bl EXT(set_mmu_ttb) 669 mov lr, x19 670 add x0, x25, PGBYTES 671 and x0, x0, #(TTBR_BADDR_MASK) 672 MSR_TTBR1_EL1_X0 673 674 // Set up MAIR attr0 for normal memory, attr1 for device memory 675 mov x0, xzr 676 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK)) 677 orr x0, x0, x1 678 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU)) 679 orr x0, x0, x1 680 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB)) 681 orr x0, x0, x1 682 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_RESERVED)) 683 orr x0, x0, x1 684 mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED)) 685 orr x0, x0, x1 686 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE)) 687 orr x0, x0, x1 688#if HAS_FEAT_XS 689 mov x1, #(MAIR_DISABLE_XS << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE_XS)) 690 orr x0, x0, x1 691 mov x1, #(MAIR_POSTED_COMBINED_REORDERED_XS << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED_XS)) 692 orr x0, x0, x1 693#else 694 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED)) 695 orr x0, x0, x1 696 mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED)) 697 orr x0, x0, x1 698#endif /* HAS_FEAT_XS */ 699 msr MAIR_EL1, x0 700 isb 701 tlbi vmalle1 702 dsb ish 703 704 705#ifndef __ARM_IC_NOALIAS_ICACHE__ 706 /* Invalidate the TLB and icache on systems that do not guarantee that the 707 * caches are invalidated on reset. 708 */ 709 tlbi vmalle1 710 ic iallu 711#endif 712 713 /* If x21 is not 0, then this is either the start_cpu path or 714 * the resume_idle_cpu path. cpu_ttep should already be 715 * populated, so just switch to the kernel_pmap now. 716 */ 717 718 cbz x21, 1f 719 adrp x0, EXT(cpu_ttep)@page 720 add x0, x0, EXT(cpu_ttep)@pageoff 721 ldr x0, [x0] 722 MSR_TTBR1_EL1_X0 7231: 724 725 // Set up the exception vectors 726#if __ARM_KERNEL_PROTECT__ 727 /* If this is not the first reset of the boot CPU, the alternate mapping 728 * for the exception vectors will be set up, so use it. Otherwise, we 729 * should use the mapping located in the kernelcache mapping. 730 */ 731 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START 732 733 cbnz x21, 1f 734#endif /* __ARM_KERNEL_PROTECT__ */ 735 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address 736 add x0, x0, EXT(ExceptionVectorsBase)@pageoff 737 add x0, x0, x22 // Convert exception vector address to KVA 738 sub x0, x0, x23 7391: 740 MSR_VBAR_EL1_X0 741 742#if HAS_APPLE_PAC 743 PAC_INIT_KEY_STATE tmp=x0, tmp2=x1 744#endif /* HAS_APPLE_PAC */ 745 746 // Enable caches, MMU, ROP and JOP 747 MOV64 x0, SCTLR_EL1_DEFAULT 748 MSR_SCTLR_EL1_X0 749 isb sy 750 751#if !VMAPPLE 752 MOV64 x1, SCTLR_EL1_DEFAULT 753 cmp x0, x1 754 bne . 755#endif /* !VMAPPLE */ 756 757#if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT))) 758 /* Watchtower 759 * 760 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching 761 * it here would trap to EL3. 762 */ 763 764 // Enable NEON 765 mov x0, #(CPACR_FPEN_ENABLE) 766 msr CPACR_EL1, x0 767#endif 768 769 // Clear thread pointer 770 msr TPIDR_EL1, xzr // Set thread register 771 772 773#if defined(APPLE_ARM64_ARCH_FAMILY) 774 mrs x12, MDSCR_EL1 775 orr x12, x12, MDSCR_TDCC 776 msr MDSCR_EL1, x12 777 // Initialization common to all non-virtual Apple targets 778#endif // APPLE_ARM64_ARCH_FAMILY 779 780 // Read MIDR before start of per-SoC tunables 781 mrs x12, MIDR_EL1 782 783 APPLY_TUNABLES x12, x13, x14 784 785#if HAS_CLUSTER && !NO_CPU_OVRD 786 // Unmask external IRQs if we're restarting from non-retention WFI 787 mrs x9, CPU_OVRD 788 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask)) 789 msr CPU_OVRD, x9 790#endif 791 792 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap. 793 cbnz x21, Ltrampoline 794 795 // Set KVA of boot args as first arg 796 add x0, x20, x22 797 sub x0, x0, x23 798 799#if KASAN 800 mov x20, x0 801 mov x21, lr 802 803 // x0: boot args 804 // x1: KVA page table phys base 805 mrs x1, TTBR1_EL1 806 bl EXT(kasan_bootstrap) 807 808 mov x0, x20 809 mov lr, x21 810#endif 811 812 // Return to arm_init() 813 ret 814 815Ltrampoline: 816 // Load VA of the trampoline 817 adrp x0, arm_init_tramp@page 818 add x0, x0, arm_init_tramp@pageoff 819 add x0, x0, x22 820 sub x0, x0, x23 821 822 // Branch to the trampoline 823 br x0 824 825/* 826 * V=P to KVA trampoline. 827 * x0 - KVA of cpu data pointer 828 */ 829 .text 830 .align 2 831arm_init_tramp: 832 ARM64_JUMP_TARGET 833 /* On a warm boot, the full kernel translation table is initialized in 834 * addition to the bootstrap tables. The layout is as follows: 835 * 836 * +--Top of Memory--+ 837 * ... 838 * | | 839 * | Primary Kernel | 840 * | Trans. Table | 841 * | | 842 * +--Top + 5 pages--+ 843 * | | 844 * | Invalid Table | 845 * | | 846 * +--Top + 4 pages--+ 847 * | | 848 * | KVA Table | 849 * | | 850 * +--Top + 2 pages--+ 851 * | | 852 * | V=P Table | 853 * | | 854 * +--Top of Kernel--+ 855 * | | 856 * | Kernel Mach-O | 857 * | | 858 * ... 859 * +---Kernel Base---+ 860 */ 861 862 863 mov x19, lr 864 // Convert CPU data PA to VA and set as first argument 865 mov x0, x21 866 bl EXT(phystokv) 867 868 mov lr, x19 869 870 /* Return to arm_init() */ 871 ret 872 873//#include "globals_asm.h" 874 875/* vim: set ts=4: */ 876