1/* 2 * Copyright (c) 2007-2022 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28#include <arm64/proc_reg.h> 29#include <arm64/asm.h> 30#include <arm64/machine_machdep.h> 31#include <arm64/proc_reg.h> 32#include <pexpert/arm64/board_config.h> 33#include <mach_assert.h> 34#include <machine/asm.h> 35#include "assym.s" 36#include <arm64/tunables/tunables.s> 37#include <arm64/exception_asm.h> 38 39#if __ARM_KERNEL_PROTECT__ 40#include <arm/pmap.h> 41#endif /* __ARM_KERNEL_PROTECT__ */ 42 43 44 45.macro MSR_VBAR_EL1_X0 46#if defined(KERNEL_INTEGRITY_KTRR) 47 mov x1, lr 48 bl EXT(pinst_set_vbar) 49 mov lr, x1 50#else 51 msr VBAR_EL1, x0 52#endif 53.endmacro 54 55.macro MSR_TCR_EL1_X1 56#if defined(KERNEL_INTEGRITY_KTRR) 57 mov x0, x1 58 mov x1, lr 59 bl EXT(pinst_set_tcr) 60 mov lr, x1 61#else 62 msr TCR_EL1, x1 63#endif 64.endmacro 65 66.macro MSR_TTBR1_EL1_X0 67#if defined(KERNEL_INTEGRITY_KTRR) 68 mov x1, lr 69 bl EXT(pinst_set_ttbr1) 70 mov lr, x1 71#else 72 msr TTBR1_EL1, x0 73#endif 74.endmacro 75 76.macro MSR_SCTLR_EL1_X0 77#if defined(KERNEL_INTEGRITY_KTRR) 78 mov x1, lr 79 80 // This may abort, do so on SP1 81 bl EXT(pinst_spsel_1) 82 83 bl EXT(pinst_set_sctlr) 84 msr SPSel, #0 // Back to SP0 85 mov lr, x1 86#else 87 msr SCTLR_EL1, x0 88#endif /* defined(KERNEL_INTEGRITY_KTRR) */ 89.endmacro 90 91/* 92 * Checks the reset handler for global and CPU-specific reset-assist functions, 93 * then jumps to the reset handler with boot args and cpu data. This is copied 94 * to the first physical page during CPU bootstrap (see cpu.c). 95 * 96 * Variables: 97 * x19 - Reset handler data pointer 98 * x20 - Boot args pointer 99 * x21 - CPU data pointer 100 */ 101 .text 102 .align 12 103 .globl EXT(LowResetVectorBase) 104LEXT(LowResetVectorBase) 105 /* 106 * On reset, both RVBAR_EL1 and VBAR_EL1 point here. SPSel.SP is 1, 107 * so on reset the CPU will jump to offset 0x0 and on exceptions 108 * the CPU will jump to offset 0x200, 0x280, 0x300, or 0x380. 109 * In order for both the reset vector and exception vectors to 110 * coexist in the same space, the reset code is moved to the end 111 * of the exception vector area. 112 */ 113 b EXT(reset_vector) 114 115 /* EL1 SP1: These vectors trap errors during early startup on non-boot CPUs. */ 116 .align 9 117 b . 118 .align 7 119 b . 120 .align 7 121 b . 122 .align 7 123 b . 124 125 .align 7 126 .globl EXT(reset_vector) 127LEXT(reset_vector) 128 // Preserve x0 for start_first_cpu, if called 129 // Unlock the core for debugging 130 msr OSLAR_EL1, xzr 131 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts 132 133#if !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) 134 // Set low reset vector before attempting any loads 135 adrp x0, EXT(LowExceptionVectorBase)@page 136 add x0, x0, EXT(LowExceptionVectorBase)@pageoff 137 msr VBAR_EL1, x0 138#endif 139 140 141 142 // Process reset handlers 143 adrp x19, EXT(ResetHandlerData)@page // Get address of the reset handler data 144 add x19, x19, EXT(ResetHandlerData)@pageoff 145 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number 146#if HAS_CLUSTER 147 and x0, x15, #0xFFFF // CPU number in Affinity0, cluster ID in Affinity1 148#else 149 and x0, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 150#endif 151 ldr x1, [x19, CPU_DATA_ENTRIES] // Load start of data entries 152 add x3, x1, MAX_CPUS * 16 // end addr of data entries = start + (16 * MAX_CPUS) 153Lcheck_cpu_data_entry: 154 ldr x21, [x1, CPU_DATA_PADDR] // Load physical CPU data address 155 cbz x21, Lnext_cpu_data_entry 156 ldr w2, [x21, CPU_PHYS_ID] // Load ccc cpu phys id 157 cmp x0, x2 // Compare cpu data phys cpu and MPIDR_EL1 phys cpu 158 b.eq Lfound_cpu_data_entry // Branch if match 159Lnext_cpu_data_entry: 160 add x1, x1, #16 // Increment to the next cpu data entry 161 cmp x1, x3 162 b.eq Lskip_cpu_reset_handler // Not found 163 b Lcheck_cpu_data_entry // loop 164Lfound_cpu_data_entry: 165 adrp x20, EXT(const_boot_args)@page 166 add x20, x20, EXT(const_boot_args)@pageoff 167 ldr x0, [x21, CPU_RESET_HANDLER] // Call CPU reset handler 168 cbz x0, Lskip_cpu_reset_handler 169 170 // Validate that our handler is one of the two expected handlers 171 adrp x2, EXT(resume_idle_cpu)@page 172 add x2, x2, EXT(resume_idle_cpu)@pageoff 173 cmp x0, x2 174 beq 1f 175 adrp x2, EXT(start_cpu)@page 176 add x2, x2, EXT(start_cpu)@pageoff 177 cmp x0, x2 178 bne Lskip_cpu_reset_handler 1791: 180 181#if HAS_BP_RET 182 bl EXT(set_bp_ret) 183#endif 184 185#if __ARM_KERNEL_PROTECT__ && defined(KERNEL_INTEGRITY_KTRR) 186 /* 187 * Populate TPIDR_EL1 (in case the CPU takes an exception while 188 * turning on the MMU). 189 */ 190 ldr x13, [x21, CPU_ACTIVE_THREAD] 191 msr TPIDR_EL1, x13 192#endif /* __ARM_KERNEL_PROTECT__ */ 193 194 blr x0 195Lskip_cpu_reset_handler: 196 b . // Hang if the handler is NULL or returns 197 198 .align 3 199 .global EXT(LowResetVectorEnd) 200LEXT(LowResetVectorEnd) 201 .global EXT(SleepToken) 202#if WITH_CLASSIC_S2R 203LEXT(SleepToken) 204 .space (stSize_NUM),0 205#endif 206 207 .section __DATA_CONST,__const 208 .align 3 209 .globl EXT(ResetHandlerData) 210LEXT(ResetHandlerData) 211 .space (rhdSize_NUM),0 // (filled with 0s) 212 .text 213 214 215/* 216 * __start trampoline is located at a position relative to LowResetVectorBase 217 * so that iBoot can compute the reset vector position to set IORVBAR using 218 * only the kernel entry point. Reset vector = (__start & ~0xfff) 219 */ 220 .align 3 221 .globl EXT(_start) 222LEXT(_start) 223 b EXT(start_first_cpu) 224 225 226/* 227 * Provides an early-boot exception vector so that the processor will spin 228 * and preserve exception information (e.g., ELR_EL1) when early CPU bootstrap 229 * code triggers an exception. This is copied to the second physical page 230 * during CPU bootstrap (see cpu.c). 231 */ 232 .align 12, 0 233 .global EXT(LowExceptionVectorBase) 234LEXT(LowExceptionVectorBase) 235 /* EL1 SP 0 */ 236 b . 237 .align 7 238 b . 239 .align 7 240 b . 241 .align 7 242 b . 243 /* EL1 SP1 */ 244 .align 7 245 b . 246 .align 7 247 b . 248 .align 7 249 b . 250 .align 7 251 b . 252 /* EL0 64 */ 253 .align 7 254 b . 255 .align 7 256 b . 257 .align 7 258 b . 259 .align 7 260 b . 261 /* EL0 32 */ 262 .align 7 263 b . 264 .align 7 265 b . 266 .align 7 267 b . 268 .align 7 269 b . 270 .align 12, 0 271 272#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 273/* 274 * Provide a global symbol so that we can narrow the V=P mapping to cover 275 * this page during arm_vm_init. 276 */ 277.align ARM_PGSHIFT 278.globl EXT(bootstrap_instructions) 279LEXT(bootstrap_instructions) 280 281#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ 282 .align 2 283 .globl EXT(resume_idle_cpu) 284LEXT(resume_idle_cpu) 285 adrp lr, EXT(arm_init_idle_cpu)@page 286 add lr, lr, EXT(arm_init_idle_cpu)@pageoff 287 b start_cpu 288 289 .align 2 290 .globl EXT(start_cpu) 291LEXT(start_cpu) 292 adrp lr, EXT(arm_init_cpu)@page 293 add lr, lr, EXT(arm_init_cpu)@pageoff 294 b start_cpu 295 296 .align 2 297start_cpu: 298#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 299 // This is done right away in reset vector for pre-KTRR devices 300 // Set low reset vector now that we are in the KTRR-free zone 301 adrp x0, EXT(LowExceptionVectorBase)@page 302 add x0, x0, EXT(LowExceptionVectorBase)@pageoff 303 MSR_VBAR_EL1_X0 304#endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */ 305 306 // x20 set to BootArgs phys address 307 // x21 set to cpu data phys address 308 309 // Get the kernel memory parameters from the boot args 310 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base 311 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base 312 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size 313 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables 314 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags 315 316 317 // Set TPIDR_EL0 with the CPU number 318 ldrsh x0, [x21, CPU_NUMBER_GS] 319 msr TPIDR_EL0, x0 320 321 // Set TPIDRRO_EL0 to 0 322 msr TPIDRRO_EL0, xzr 323 324 325 // Set the exception stack pointer 326 ldr x0, [x21, CPU_EXCEPSTACK_TOP] 327 328 329 // Set SP_EL1 to exception stack 330#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 331 mov x1, lr 332 bl EXT(pinst_spsel_1) 333 mov lr, x1 334#else 335 msr SPSel, #1 336#endif 337 mov sp, x0 338 339 // Set the interrupt stack pointer 340 ldr x0, [x21, CPU_INTSTACK_TOP] 341 msr SPSel, #0 342 mov sp, x0 343 344 // Convert lr to KVA 345 add lr, lr, x22 346 sub lr, lr, x23 347 348 b common_start 349 350/* 351 * create_l1_table_entry 352 * 353 * Given a virtual address, creates a table entry in an L1 translation table 354 * to point to an L2 translation table. 355 * arg0 - Virtual address 356 * arg1 - L1 table address 357 * arg2 - L2 table address 358 * arg3 - Scratch register 359 * arg4 - Scratch register 360 * arg5 - Scratch register 361 */ 362.macro create_l1_table_entry 363 and $3, $0, #(ARM_TT_L1_INDEX_MASK) 364 lsr $3, $3, #(ARM_TT_L1_SHIFT) // Get index in L1 table for L2 table 365 lsl $3, $3, #(TTE_SHIFT) // Convert index into pointer offset 366 add $3, $1, $3 // Get L1 entry pointer 367 mov $4, #(ARM_TTE_BOOT_TABLE) // Get L1 table entry template 368 and $5, $2, #(ARM_TTE_TABLE_MASK) // Get address bits of L2 table 369 orr $5, $4, $5 // Create table entry for L2 table 370 str $5, [$3] // Write entry to L1 table 371.endmacro 372 373/* 374 * create_l2_block_entries 375 * 376 * Given base virtual and physical addresses, creates consecutive block entries 377 * in an L2 translation table. 378 * arg0 - Virtual address 379 * arg1 - Physical address 380 * arg2 - L2 table address 381 * arg3 - Number of entries 382 * arg4 - Scratch register 383 * arg5 - Scratch register 384 * arg6 - Scratch register 385 * arg7 - Scratch register 386 */ 387.macro create_l2_block_entries 388 and $4, $0, #(ARM_TT_L2_INDEX_MASK) 389 lsr $4, $4, #(ARM_TTE_BLOCK_L2_SHIFT) // Get index in L2 table for block entry 390 lsl $4, $4, #(TTE_SHIFT) // Convert index into pointer offset 391 add $4, $2, $4 // Get L2 entry pointer 392 mov $5, #(ARM_TTE_BOOT_BLOCK) // Get L2 block entry template 393 and $6, $1, #(ARM_TTE_BLOCK_L2_MASK) // Get address bits of block mapping 394 orr $6, $5, $6 395 mov $5, $3 396 mov $7, #(ARM_TT_L2_SIZE) 3971: 398 str $6, [$4], #(1 << TTE_SHIFT) // Write entry to L2 table and advance 399 add $6, $6, $7 // Increment the output address 400 subs $5, $5, #1 // Decrement the number of entries 401 b.ne 1b 402.endmacro 403 404/* 405 * arg0 - virtual start address 406 * arg1 - physical start address 407 * arg2 - number of entries to map 408 * arg3 - L1 table address 409 * arg4 - free space pointer 410 * arg5 - scratch (entries mapped per loop) 411 * arg6 - scratch 412 * arg7 - scratch 413 * arg8 - scratch 414 * arg9 - scratch 415 */ 416.macro create_bootstrap_mapping 417 /* calculate entries left in this page */ 418 and $5, $0, #(ARM_TT_L2_INDEX_MASK) 419 lsr $5, $5, #(ARM_TT_L2_SHIFT) 420 mov $6, #(TTE_PGENTRIES) 421 sub $5, $6, $5 422 423 /* allocate an L2 table */ 4243: add $4, $4, PGBYTES 425 426 /* create_l1_table_entry(virt_base, L1 table, L2 table, scratch1, scratch2, scratch3) */ 427 create_l1_table_entry $0, $3, $4, $6, $7, $8 428 429 /* determine how many entries to map this loop - the smaller of entries 430 * remaining in page and total entries left */ 431 cmp $2, $5 432 csel $5, $2, $5, lt 433 434 /* create_l2_block_entries(virt_base, phys_base, L2 table, num_ents, scratch1, scratch2, scratch3) */ 435 create_l2_block_entries $0, $1, $4, $5, $6, $7, $8, $9 436 437 /* subtract entries just mapped and bail out if we're done */ 438 subs $2, $2, $5 439 beq 2f 440 441 /* entries left to map - advance base pointers */ 442 add $0, $0, $5, lsl #(ARM_TT_L2_SHIFT) 443 add $1, $1, $5, lsl #(ARM_TT_L2_SHIFT) 444 445 mov $5, #(TTE_PGENTRIES) /* subsequent loops map (up to) a whole L2 page */ 446 b 3b 4472: 448.endmacro 449 450/* 451 * _start_first_cpu 452 * Cold boot init routine. Called from __start 453 * x0 - Boot args 454 */ 455 .align 2 456 .globl EXT(start_first_cpu) 457LEXT(start_first_cpu) 458 459 // Unlock the core for debugging 460 msr OSLAR_EL1, xzr 461 msr DAIFSet, #(DAIFSC_ALL) // Disable all interrupts 462 463 mov x20, x0 464 mov x21, #0 465 466 // Set low reset vector before attempting any loads 467 adrp x0, EXT(LowExceptionVectorBase)@page 468 add x0, x0, EXT(LowExceptionVectorBase)@pageoff 469 MSR_VBAR_EL1_X0 470 471 472 // Get the kernel memory parameters from the boot args 473 ldr x22, [x20, BA_VIRT_BASE] // Get the kernel virt base 474 ldr x23, [x20, BA_PHYS_BASE] // Get the kernel phys base 475 ldr x24, [x20, BA_MEM_SIZE] // Get the physical memory size 476 adrp x25, EXT(bootstrap_pagetables)@page // Get the start of the page tables 477 ldr x26, [x20, BA_BOOT_FLAGS] // Get the kernel boot flags 478 479 // Clear the registers that will be used to store the userspace thread pointer and CPU number. 480 // We may not actually be booting from ordinal CPU 0, so this register will be updated 481 // in ml_parse_cpu_topology(), which happens later in bootstrap. 482 msr TPIDRRO_EL0, xzr 483 msr TPIDR_EL0, xzr 484 485 // Set up exception stack pointer 486 adrp x0, EXT(excepstack_top)@page // Load top of exception stack 487 add x0, x0, EXT(excepstack_top)@pageoff 488 add x0, x0, x22 // Convert to KVA 489 sub x0, x0, x23 490 491 // Set SP_EL1 to exception stack 492#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 493 bl EXT(pinst_spsel_1) 494#else 495 msr SPSel, #1 496#endif 497 498 mov sp, x0 499 500 // Set up interrupt stack pointer 501 adrp x0, EXT(intstack_top)@page // Load top of irq stack 502 add x0, x0, EXT(intstack_top)@pageoff 503 add x0, x0, x22 // Convert to KVA 504 sub x0, x0, x23 505 msr SPSel, #0 // Set SP_EL0 to interrupt stack 506 mov sp, x0 507 508 // Load address to the C init routine into link register 509 adrp lr, EXT(arm_init)@page 510 add lr, lr, EXT(arm_init)@pageoff 511 add lr, lr, x22 // Convert to KVA 512 sub lr, lr, x23 513 514 /* 515 * Set up the bootstrap page tables with a single block entry for the V=P 516 * mapping, a single block entry for the trampolined kernel address (KVA), 517 * and all else invalid. This requires four pages: 518 * Page 1 - V=P L1 table 519 * Page 2 - V=P L2 table 520 * Page 3 - KVA L1 table 521 * Page 4 - KVA L2 table 522 */ 523 524 // Invalidate all entries in the bootstrap page tables 525 mov x0, #(ARM_TTE_EMPTY) // Load invalid entry template 526 mov x1, x25 // Start at V=P pagetable root 527 mov x2, #(TTE_PGENTRIES) // Load number of entries per page 528 lsl x2, x2, #2 // Shift by 2 for num entries on 4 pages 529 530Linvalidate_bootstrap: // do { 531 str x0, [x1], #(1 << TTE_SHIFT) // Invalidate and advance 532 subs x2, x2, #1 // entries-- 533 b.ne Linvalidate_bootstrap // } while (entries != 0) 534 535 /* 536 * In order to reclaim memory on targets where TZ0 (or some other entity) 537 * must be located at the base of memory, iBoot may set the virtual and 538 * physical base addresses to immediately follow whatever lies at the 539 * base of physical memory. 540 * 541 * If the base address belongs to TZ0, it may be dangerous for xnu to map 542 * it (as it may be prefetched, despite being technically inaccessible). 543 * In order to avoid this issue while keeping the mapping code simple, we 544 * may continue to use block mappings, but we will only map the kernelcache 545 * mach header to the end of memory. 546 * 547 * Given that iBoot guarantees that the unslid kernelcache base address 548 * will begin on an L2 boundary, this should prevent us from accidentally 549 * mapping TZ0. 550 */ 551 adrp x0, EXT(_mh_execute_header)@page // address of kernel mach header 552 add x0, x0, EXT(_mh_execute_header)@pageoff 553 ldr w1, [x0, #0x18] // load mach_header->flags 554 tbz w1, #0x1f, Lkernelcache_base_found // if MH_DYLIB_IN_CACHE unset, base is kernel mach header 555 ldr w1, [x0, #0x20] // load first segment cmd (offset sizeof(kernel_mach_header_t)) 556 cmp w1, #0x19 // must be LC_SEGMENT_64 557 bne . 558 ldr x1, [x0, #0x38] // load first segment vmaddr 559 sub x1, x0, x1 // compute slide 560 MOV64 x0, VM_KERNEL_LINK_ADDRESS 561 add x0, x0, x1 // base is kernel link address + slide 562 563Lkernelcache_base_found: 564 /* 565 * Adjust physical and virtual base addresses to account for physical 566 * memory preceeding xnu Mach-O header 567 * x22 - Kernel virtual base 568 * x23 - Kernel physical base 569 * x24 - Physical memory size 570 */ 571 sub x18, x0, x23 572 sub x24, x24, x18 573 add x22, x22, x18 574 add x23, x23, x18 575 576 /* 577 * x0 - V=P virtual cursor 578 * x4 - V=P physical cursor 579 * x14 - KVA virtual cursor 580 * x15 - KVA physical cursor 581 */ 582 mov x4, x0 583 mov x14, x22 584 mov x15, x23 585 586 /* 587 * Allocate L1 tables 588 * x1 - V=P L1 page 589 * x3 - KVA L1 page 590 * x2 - free mem pointer from which we allocate a variable number of L2 591 * pages. The maximum number of bootstrap page table pages is limited to 592 * BOOTSTRAP_TABLE_SIZE. For a 2G 4k page device, assuming the worst-case 593 * slide, we need 1xL1 and up to 3xL2 pages (1GB mapped per L1 entry), so 594 * 8 total pages for V=P and KVA. 595 */ 596 mov x1, x25 597 add x3, x1, PGBYTES 598 mov x2, x3 599 600 /* 601 * Setup the V=P bootstrap mapping 602 * x5 - total number of L2 entries to allocate 603 */ 604 lsr x5, x24, #(ARM_TT_L2_SHIFT) 605 /* create_bootstrap_mapping(vbase, pbase, num_ents, L1 table, freeptr) */ 606 create_bootstrap_mapping x0, x4, x5, x1, x2, x6, x10, x11, x12, x13 607 608 /* Setup the KVA bootstrap mapping */ 609 lsr x5, x24, #(ARM_TT_L2_SHIFT) 610 create_bootstrap_mapping x14, x15, x5, x3, x2, x9, x10, x11, x12, x13 611 612 /* Ensure TTEs are visible */ 613 dsb ish 614 615 616 b common_start 617 618/* 619 * Begin common CPU initialization 620 * 621 * Regster state: 622 * x20 - PA of boot args 623 * x21 - zero on cold boot, PA of cpu data on warm reset 624 * x22 - Kernel virtual base 625 * x23 - Kernel physical base 626 * x25 - PA of the V=P pagetable root 627 * lr - KVA of C init routine 628 * sp - SP_EL0 selected 629 * 630 * SP_EL0 - KVA of CPU's interrupt stack 631 * SP_EL1 - KVA of CPU's exception stack 632 * TPIDRRO_EL0 - CPU number 633 */ 634common_start: 635 636#if HAS_NEX_PG 637 mov x19, lr 638 bl EXT(set_nex_pg) 639 mov lr, x19 640#endif 641 642 // Set the translation control register. 643 adrp x0, EXT(sysreg_restore)@page // Load TCR value from the system register restore structure 644 add x0, x0, EXT(sysreg_restore)@pageoff 645 ldr x1, [x0, SR_RESTORE_TCR_EL1] 646 MSR_TCR_EL1_X1 647 648 /* Set up translation table base registers. 649 * TTBR0 - V=P table @ top of kernel 650 * TTBR1 - KVA table @ top of kernel + 1 page 651 */ 652#if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) 653 /* Note that for KTRR configurations, the V=P map will be modified by 654 * arm_vm_init.c. 655 */ 656#endif 657 and x0, x25, #(TTBR_BADDR_MASK) 658 mov x19, lr 659 bl EXT(set_mmu_ttb) 660 mov lr, x19 661 add x0, x25, PGBYTES 662 and x0, x0, #(TTBR_BADDR_MASK) 663 MSR_TTBR1_EL1_X0 664 665 // Set up MAIR attr0 for normal memory, attr1 for device memory 666 mov x0, xzr 667 mov x1, #(MAIR_WRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITEBACK)) 668 orr x0, x0, x1 669 mov x1, #(MAIR_INNERWRITEBACK << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_INNERWRITEBACK)) 670 orr x0, x0, x1 671 mov x1, #(MAIR_DISABLE << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_DISABLE)) 672 orr x0, x0, x1 673 mov x1, #(MAIR_WRITETHRU << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITETHRU)) 674 orr x0, x0, x1 675 mov x1, #(MAIR_WRITECOMB << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_WRITECOMB)) 676 orr x0, x0, x1 677 mov x1, #(MAIR_POSTED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED)) 678 orr x0, x0, x1 679 mov x1, #(MAIR_POSTED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_REORDERED)) 680 orr x0, x0, x1 681 mov x1, #(MAIR_POSTED_COMBINED_REORDERED << MAIR_ATTR_SHIFT(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED)) 682 orr x0, x0, x1 683 msr MAIR_EL1, x0 684 isb 685 tlbi vmalle1 686 dsb ish 687 688 689#if defined(BCM2837) 690 // Setup timer interrupt routing; must be done before MMU is enabled 691 mrs x15, MPIDR_EL1 // Load MPIDR to get CPU number 692 and x15, x15, #0xFF // CPU number is in MPIDR Affinity Level 0 693 mov x0, #0x4000 694 lsl x0, x0, #16 695 add x0, x0, #0x0040 // x0: 0x4000004X Core Timers interrupt control 696 add x0, x0, x15, lsl #2 697 mov w1, #0xF0 // x1: 0xF0 Route to Core FIQs 698 str w1, [x0] 699 isb sy 700#endif 701 702#ifndef __ARM_IC_NOALIAS_ICACHE__ 703 /* Invalidate the TLB and icache on systems that do not guarantee that the 704 * caches are invalidated on reset. 705 */ 706 tlbi vmalle1 707 ic iallu 708#endif 709 710 /* If x21 is not 0, then this is either the start_cpu path or 711 * the resume_idle_cpu path. cpu_ttep should already be 712 * populated, so just switch to the kernel_pmap now. 713 */ 714 715 cbz x21, 1f 716 adrp x0, EXT(cpu_ttep)@page 717 add x0, x0, EXT(cpu_ttep)@pageoff 718 ldr x0, [x0] 719 MSR_TTBR1_EL1_X0 7201: 721 722 // Set up the exception vectors 723#if __ARM_KERNEL_PROTECT__ 724 /* If this is not the first reset of the boot CPU, the alternate mapping 725 * for the exception vectors will be set up, so use it. Otherwise, we 726 * should use the mapping located in the kernelcache mapping. 727 */ 728 MOV64 x0, ARM_KERNEL_PROTECT_EXCEPTION_START 729 730 cbnz x21, 1f 731#endif /* __ARM_KERNEL_PROTECT__ */ 732 adrp x0, EXT(ExceptionVectorsBase)@page // Load exception vectors base address 733 add x0, x0, EXT(ExceptionVectorsBase)@pageoff 734 add x0, x0, x22 // Convert exception vector address to KVA 735 sub x0, x0, x23 7361: 737 MSR_VBAR_EL1_X0 738 739#if HAS_APPLE_PAC 740 PAC_INIT_KEY_STATE tmp=x0, tmp2=x1 741#endif /* HAS_APPLE_PAC */ 742 743 // Enable caches, MMU, ROP and JOP 744 MOV64 x0, SCTLR_EL1_DEFAULT 745 MSR_SCTLR_EL1_X0 746 isb sy 747 748 MOV64 x1, SCTLR_EL1_DEFAULT 749 cmp x0, x1 750 bne . 751 752#if (!CONFIG_KERNEL_INTEGRITY || (CONFIG_KERNEL_INTEGRITY && !defined(KERNEL_INTEGRITY_WT))) 753 /* Watchtower 754 * 755 * If we have a Watchtower monitor it will setup CPACR_EL1 for us, touching 756 * it here would trap to EL3. 757 */ 758 759 // Enable NEON 760 mov x0, #(CPACR_FPEN_ENABLE) 761 msr CPACR_EL1, x0 762#endif 763 764 // Clear thread pointer 765 msr TPIDR_EL1, xzr // Set thread register 766 767 768#if defined(APPLE_ARM64_ARCH_FAMILY) 769 mrs x12, MDSCR_EL1 770 orr x12, x12, MDSCR_TDCC 771 msr MDSCR_EL1, x12 772 // Initialization common to all non-virtual Apple targets 773#endif // APPLE_ARM64_ARCH_FAMILY 774 775 // Read MIDR before start of per-SoC tunables 776 mrs x12, MIDR_EL1 777 778 APPLY_TUNABLES x12, x13, x14 779 780 781#if HAS_CLUSTER 782 // Unmask external IRQs if we're restarting from non-retention WFI 783 mrs x9, CPU_OVRD 784 and x9, x9, #(~(ARM64_REG_CYC_OVRD_irq_mask | ARM64_REG_CYC_OVRD_fiq_mask)) 785 msr CPU_OVRD, x9 786#endif 787 788 // If x21 != 0, we're doing a warm reset, so we need to trampoline to the kernel pmap. 789 cbnz x21, Ltrampoline 790 791 // Set KVA of boot args as first arg 792 add x0, x20, x22 793 sub x0, x0, x23 794 795#if KASAN 796 mov x20, x0 797 mov x21, lr 798 799 // x0: boot args 800 // x1: KVA page table phys base 801 mrs x1, TTBR1_EL1 802 bl EXT(kasan_bootstrap) 803 804 mov x0, x20 805 mov lr, x21 806#endif 807 808 // Return to arm_init() 809 ret 810 811Ltrampoline: 812 // Load VA of the trampoline 813 adrp x0, arm_init_tramp@page 814 add x0, x0, arm_init_tramp@pageoff 815 add x0, x0, x22 816 sub x0, x0, x23 817 818 // Branch to the trampoline 819 br x0 820 821/* 822 * V=P to KVA trampoline. 823 * x0 - KVA of cpu data pointer 824 */ 825 .text 826 .align 2 827arm_init_tramp: 828 /* On a warm boot, the full kernel translation table is initialized in 829 * addition to the bootstrap tables. The layout is as follows: 830 * 831 * +--Top of Memory--+ 832 * ... 833 * | | 834 * | Primary Kernel | 835 * | Trans. Table | 836 * | | 837 * +--Top + 5 pages--+ 838 * | | 839 * | Invalid Table | 840 * | | 841 * +--Top + 4 pages--+ 842 * | | 843 * | KVA Table | 844 * | | 845 * +--Top + 2 pages--+ 846 * | | 847 * | V=P Table | 848 * | | 849 * +--Top of Kernel--+ 850 * | | 851 * | Kernel Mach-O | 852 * | | 853 * ... 854 * +---Kernel Base---+ 855 */ 856 857 858 mov x19, lr 859 // Convert CPU data PA to VA and set as first argument 860 mov x0, x21 861 bl EXT(phystokv) 862 863 mov lr, x19 864 865 /* Return to arm_init() */ 866 ret 867 868//#include "globals_asm.h" 869 870/* vim: set ts=4: */ 871