1 /* 2 * Copyright (c) 2007-2022 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* CMU_ENDHIST */ 32 /* 33 * Mach Operating System 34 * Copyright (c) 1991,1990 Carnegie Mellon University 35 * All Rights Reserved. 36 * 37 * Permission to use, copy, modify and distribute this software and its 38 * documentation is hereby granted, provided that both the copyright 39 * notice and this permission notice appear in all copies of the 40 * software, derivative works or modified versions, and any portions 41 * thereof, and that both notices appear in supporting documentation. 42 * 43 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 44 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 45 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 46 * 47 * Carnegie Mellon requests users of this software to return to 48 * 49 * Software Distribution Coordinator or [email protected] 50 * School of Computer Science 51 * Carnegie Mellon University 52 * Pittsburgh PA 15213-3890 53 * 54 * any improvements or extensions that they make and grant Carnegie Mellon 55 * the rights to redistribute these changes. 56 */ 57 58 /* 59 * Processor registers for ARM/ARM64 60 */ 61 #ifndef _ARM64_PROC_REG_H_ 62 #define _ARM64_PROC_REG_H_ 63 64 #if defined (__arm64__) 65 #include <pexpert/arm64/board_config.h> 66 #elif defined (__arm__) 67 #include <pexpert/arm/board_config.h> 68 #endif 69 70 /* 71 * Processor registers for ARM 72 */ 73 #if __ARM_42BIT_PA_SPACE__ 74 /* For now, force the issue! */ 75 /* We need more VA space for the identity map to bootstrap the MMU */ 76 #undef __ARM64_PMAP_SUBPAGE_L1__ 77 #endif /* __ARM_42BIT_PA_SPACE__ */ 78 79 /* For arm platforms, create one pset per cluster */ 80 #define MAX_PSETS MAX_CPU_CLUSTERS 81 82 /* 83 * The clutch scheduler is enabled only on non-AMP platforms for now. 84 */ 85 #if CONFIG_CLUTCH 86 87 #if __ARM_AMP__ 88 89 /* Enable the Edge scheduler for all AS Mac platforms */ 90 #if XNU_TARGET_OS_OSX 91 #define CONFIG_SCHED_CLUTCH 1 92 #define CONFIG_SCHED_EDGE 1 93 #endif /* XNU_TARGET_OS_OSX */ 94 95 #else /* __ARM_AMP__ */ 96 #define CONFIG_SCHED_CLUTCH 1 97 #endif /* __ARM_AMP__ */ 98 99 #endif /* CONFIG_CLUTCH */ 100 101 /* Thread groups are enabled on all ARM platforms (irrespective of scheduler) */ 102 #define CONFIG_THREAD_GROUPS 1 103 104 #ifdef XNU_KERNEL_PRIVATE 105 106 #if __ARM_VFP__ 107 #define ARM_VFP_DEBUG 0 108 #endif /* __ARM_VFP__ */ 109 110 #endif /* XNU_KERNEL_PRIVATE */ 111 112 /* 113 * FSR registers 114 * 115 * CPSR: Current Program Status Register 116 * SPSR: Saved Program Status Registers 117 * 118 * 31 30 29 28 27 24 19 16 9 8 7 6 5 4 0 119 * +-----------------------------------------------------------+ 120 * | N| Z| C| V| Q|...| J|...|GE[3:0]|...| E| A| I| F| T| MODE | 121 * +-----------------------------------------------------------+ 122 */ 123 124 /* 125 * Flags 126 */ 127 #define PSR_NF 0x80000000 /* Negative/Less than */ 128 #define PSR_ZF 0x40000000 /* Zero */ 129 #define PSR_CF 0x20000000 /* Carry/Borrow/Extend */ 130 #define PSR_VF 0x10000000 /* Overflow */ 131 132 /* 133 * Modified execution mode flags 134 */ 135 #define PSR_TF 0x00000020 /* thumb flag (BX ARMv4T) */ 136 137 /* 138 * CPU mode 139 */ 140 #define PSR_USER_MODE 0x00000010 /* User mode */ 141 142 #define PSR_MODE_MASK 0x0000001F 143 #define PSR_IS_KERNEL(psr) (((psr) & PSR_MODE_MASK) != PSR_USER_MODE) 144 #define PSR_IS_USER(psr) (((psr) & PSR_MODE_MASK) == PSR_USER_MODE) 145 146 #define PSR_USERDFLT PSR_USER_MODE 147 148 /* 149 * Cache configuration 150 */ 151 152 #if defined (APPLETYPHOON) 153 154 /* I-Cache */ 155 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 156 157 /* D-Cache */ 158 #define MMU_CLINE 6 /* cache line size as 1<<MMU_CLINE (64) */ 159 160 #elif defined (APPLETWISTER) 161 162 /* I-Cache */ 163 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 164 165 /* D-Cache */ 166 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 167 168 #elif defined (APPLEHURRICANE) 169 170 /* I-Cache */ 171 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 172 173 /* D-Cache */ 174 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 175 176 #elif defined (APPLEMONSOON) 177 178 /* I-Cache, 96KB for Monsoon, 48KB for Mistral, 6-way. */ 179 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 180 181 /* D-Cache, 64KB for Monsoon, 32KB for Mistral, 4-way. */ 182 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 183 184 #elif defined (APPLEVORTEX) 185 186 /* I-Cache, 128KB 8-way for Vortex, 48KB 6-way for Tempest. */ 187 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 188 189 /* D-Cache, 128KB 8-way for Vortex, 32KB 4-way for Tempest. */ 190 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 191 192 #elif defined (APPLELIGHTNING) 193 194 /* I-Cache, 192KB for Lightning, 96KB for Thunder, 6-way. */ 195 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 196 197 /* D-Cache, 128KB for Lightning, 8-way. 48KB for Thunder, 6-way. */ 198 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 199 200 #elif defined (APPLEFIRESTORM) 201 202 /* I-Cache, 256KB for Firestorm, 128KB for Icestorm, 6-way. */ 203 #define MMU_I_CLINE 6 /* cache line size as 1<<MMU_I_CLINE (64) */ 204 205 /* D-Cache, 160KB for Firestorm, 8-way. 64KB for Icestorm, 6-way. */ 206 #define MMU_CLINE 6 /* cache line size is 1<<MMU_CLINE (64) */ 207 208 #elif defined (BCM2837) /* Raspberry Pi 3 */ 209 210 /* I-Cache. We don't have detailed spec so we just follow the ARM technical reference. */ 211 #define MMU_I_CLINE 6 212 213 /* D-Cache. */ 214 #define MMU_CLINE 6 215 216 #elif defined (VMAPPLE) 217 218 /* I-Cache. */ 219 #define MMU_I_CLINE 6 220 221 /* D-Cache. */ 222 #define MMU_CLINE 6 223 224 #else 225 #error processor not supported 226 #endif 227 228 #define MAX_L2_CLINE_BYTES (1 << MAX_L2_CLINE) 229 230 /* 231 * Format of the Debug & Watchpoint Breakpoint Value and Control Registers 232 */ 233 #define ARM_DBG_VR_ADDRESS_MASK 0xFFFFFFFC /* BVR & WVR */ 234 #define ARM_DBG_VR_ADDRESS_MASK64 0xFFFFFFFFFFFFFFFCull /* BVR & WVR */ 235 236 #define ARM_DBG_CR_ADDRESS_MASK_MASK 0x1F000000 /* BCR & WCR */ 237 #define ARM_DBGBCR_MATCH_MASK (1 << 22) /* BCR only */ 238 #define ARM_DBGBCR_TYPE_MASK (1 << 21) /* BCR only */ 239 #define ARM_DBGBCR_TYPE_IVA (0 << 21) 240 #define ARM_DBG_CR_LINKED_MASK (1 << 20) /* BCR & WCR */ 241 #define ARM_DBG_CR_LINKED_UNLINKED (0 << 20) 242 #define ARM_DBG_CR_SECURITY_STATE_BOTH (0 << 14) 243 #define ARM_DBG_CR_HIGHER_MODE_ENABLE (1 << 13) 244 #define ARM_DBGWCR_BYTE_ADDRESS_SELECT_MASK 0x00001FE0 /* WCR only */ 245 #define ARM_DBG_CR_BYTE_ADDRESS_SELECT_MASK 0x000001E0 /* BCR & WCR */ 246 #define ARM_DBGWCR_ACCESS_CONTROL_MASK (3 << 3) /* WCR only */ 247 #define ARM_DBG_CR_MODE_CONTROL_PRIVILEGED (1 << 1) /* BCR & WCR */ 248 #define ARM_DBG_CR_MODE_CONTROL_USER (2 << 1) /* BCR & WCR */ 249 #define ARM_DBG_CR_ENABLE_MASK (1 << 0) /* BCR & WCR */ 250 #define ARM_DBG_CR_ENABLE_ENABLE (1 << 0) 251 252 /* 253 * Format of the OS Lock Access (DBGOSLAR) and Lock Access Registers (DBGLAR) 254 */ 255 #define ARM_DBG_LOCK_ACCESS_KEY 0xC5ACCE55 256 257 /* ARM Debug registers of interest */ 258 #define ARM_DEBUG_OFFSET_DBGPRCR (0x310) 259 #define ARM_DEBUG_OFFSET_DBGLAR (0xFB0) 260 261 /* 262 * Main ID Register (MIDR) 263 * 264 * 31 24 23 20 19 16 15 4 3 0 265 * +-----+-----+------+------+-----+ 266 * | IMP | VAR | ARCH | PNUM | REV | 267 * +-----+-----+------+------+-----+ 268 * 269 * where: 270 * IMP: Implementor code 271 * VAR: Variant number 272 * ARCH: Architecture code 273 * PNUM: Primary part number 274 * REV: Minor revision number 275 */ 276 #define MIDR_REV_SHIFT 0 277 #define MIDR_REV_MASK (0xf << MIDR_REV_SHIFT) 278 #define MIDR_VAR_SHIFT 20 279 #define MIDR_VAR_MASK (0xf << MIDR_VAR_SHIFT) 280 281 282 #if __ARM_KERNEL_PROTECT__ 283 /* 284 * __ARM_KERNEL_PROTECT__ is a feature intended to guard against potential 285 * architectural or microarchitectural vulnerabilities that could allow cores to 286 * read/access EL1-only mappings while in EL0 mode. This is achieved by 287 * removing as many mappings as possible when the core transitions to EL0 mode 288 * from EL1 mode, and restoring those mappings when the core transitions to EL1 289 * mode from EL0 mode. 290 * 291 * At the moment, this is achieved through use of ASIDs and TCR_EL1. TCR_EL1 is 292 * used to map and unmap the ordinary kernel mappings, by contracting and 293 * expanding translation zone size for TTBR1 when exiting and entering EL1, 294 * respectively: 295 * 296 * Kernel EL0 Mappings: TTBR1 mappings that must remain mapped while the core is 297 * is in EL0. 298 * Kernel EL1 Mappings: TTBR1 mappings that must be mapped while the core is in 299 * EL1. 300 * 301 * T1SZ_USER: T1SZ_BOOT + 1 302 * TTBR1_EL1_BASE_BOOT: (2^64) - (2^(64 - T1SZ_BOOT) 303 * TTBR1_EL1_BASE_USER: (2^64) - (2^(64 - T1SZ_USER) 304 * TTBR1_EL1_MAX: (2^64) - 1 305 * 306 * When in EL1, we program TCR_EL1 (specifically, TCR_EL1.T1SZ) to give the 307 * the following TTBR1 layout: 308 * 309 * TTBR1_EL1_BASE_BOOT TTBR1_EL1_BASE_USER TTBR1_EL1_MAX 310 * +---------------------------------------------------------+ 311 * | Kernel EL0 Mappings | Kernel EL1 Mappings | 312 * +---------------------------------------------------------+ 313 * 314 * And when in EL0, we program TCR_EL1 to give the following TTBR1 layout: 315 * 316 * TTBR1_EL1_BASE_USER TTBR1_EL1_MAX 317 * +---------------------------------------------------------+ 318 * | Kernel EL0 Mappings | 319 * +---------------------------------------------------------+ 320 * 321 * With the current implementation, both the EL0 and EL1 mappings for the kernel 322 * use otherwise empty translation tables for mapping the exception vectors (so 323 * that we do not need to TLB flush the exception vector address when switching 324 * between EL0 and EL1). The rationale here is that the TLBI would require a 325 * DSB, and DSBs can be extremely expensive. 326 * 327 * Each pmap is given two ASIDs: (n & ~1) as an EL0 ASID, and (n | 1) as an EL1 328 * ASID. The core switches between ASIDs on EL transitions, so that the TLB 329 * does not need to be fully invalidated on an EL transition. 330 * 331 * Most kernel mappings will be marked non-global in this configuration, as 332 * global mappings would be visible to userspace unless we invalidate them on 333 * eret. 334 */ 335 #if XNU_MONITOR 336 /* 337 * Please note that because we indirect through the thread register in order to 338 * locate the kernel, and because we unmap most of the kernel, the security 339 * model of the PPL is undermined by __ARM_KERNEL_PROTECT__, as we rely on 340 * kernel controlled data to direct codeflow in the exception vectors. 341 * 342 * If we want to ship XNU_MONITOR paired with __ARM_KERNEL_PROTECT__, we will 343 * need to find a performant solution to this problem. 344 */ 345 #endif 346 #endif /* __ARM_KERNEL_PROTECT */ 347 348 #if ARM_PARAMETERIZED_PMAP 349 /* 350 * ARM_PARAMETERIZED_PMAP configures the kernel to get the characteristics of 351 * the page tables (number of levels, size of the root allocation) from the 352 * pmap data structure, rather than treating them as compile-time constants. 353 * This allows the pmap code to dynamically adjust how it deals with page 354 * tables. 355 */ 356 #endif /* ARM_PARAMETERIZED_PMAP */ 357 358 #if __ARM_MIXED_PAGE_SIZE__ 359 /* 360 * __ARM_MIXED_PAGE_SIZE__ configures the kernel to support page tables that do 361 * not use the kernel page size. This is primarily meant to support running 362 * 4KB page processes on a 16KB page kernel. 363 * 364 * This only covers support in the pmap/machine dependent layers. Any support 365 * elsewhere in the kernel must be managed separately. 366 */ 367 #if !ARM_PARAMETERIZED_PMAP 368 /* 369 * Page tables that use non-kernel page sizes require us to reprogram TCR based 370 * on the page tables we are switching to. This means that the parameterized 371 * pmap support is required. 372 */ 373 #error __ARM_MIXED_PAGE_SIZE__ requires ARM_PARAMETERIZED_PMAP 374 #endif /* !ARM_PARAMETERIZED_PMAP */ 375 #if __ARM_KERNEL_PROTECT__ 376 /* 377 * Because switching the page size requires updating TCR based on the pmap, and 378 * __ARM_KERNEL_PROTECT__ relies on TCR being programmed with constants, XNU 379 * does not currently support support configurations that use both 380 * __ARM_KERNEL_PROTECT__ and __ARM_MIXED_PAGE_SIZE__. 381 */ 382 #error __ARM_MIXED_PAGE_SIZE__ and __ARM_KERNEL_PROTECT__ are mutually exclusive 383 #endif /* __ARM_KERNEL_PROTECT__ */ 384 #endif /* __ARM_MIXED_PAGE_SIZE__ */ 385 386 /* 387 * 64-bit Program Status Register (PSR64) 388 * 389 * 31 27 23 22 21 20 19 10 9 5 4 0 390 * +-+-+-+-+-----+---+--+--+----------+-+-+-+-+-+-----+ 391 * |N|Z|C|V|00000|PAN|SS|IL|0000000000|D|A|I|F|0| M | 392 * +-+-+-+-+-+---+---+--+--+----------+-+-+-+-+-+-----+ 393 * 394 * where: 395 * NZCV: Comparison flags 396 * PAN: Privileged Access Never 397 * SS: Single step 398 * IL: Illegal state 399 * DAIF: Interrupt masks 400 * M: Mode field 401 */ 402 403 #define PSR64_NZCV_SHIFT 28 404 #define PSR64_NZCV_MASK (0xF << PSR64_NZCV_SHIFT) 405 406 #define PSR64_N_SHIFT 31 407 #define PSR64_N (1 << PSR64_N_SHIFT) 408 409 #define PSR64_Z_SHIFT 30 410 #define PSR64_Z (1 << PSR64_Z_SHIFT) 411 412 #define PSR64_C_SHIFT 29 413 #define PSR64_C (1 << PSR64_C_SHIFT) 414 415 #define PSR64_V_SHIFT 28 416 #define PSR64_V (1 << PSR64_V_SHIFT) 417 418 #define PSR64_TCO_SHIFT 25 419 #define PSR64_TCO (1 << PSR64_TCO_SHIFT) 420 421 #define PSR64_DIT_SHIFT 24 422 #define PSR64_DIT (1 << PSR64_DIT_SHIFT) 423 424 #define PSR64_UAO_SHIFT 23 425 #define PSR64_UAO (1 << PSR64_UAO_SHIFT) 426 427 #define PSR64_PAN_SHIFT 22 428 #define PSR64_PAN (1 << PSR64_PAN_SHIFT) 429 430 #define PSR64_SS_SHIFT 21 431 #define PSR64_SS (1 << PSR64_SS_SHIFT) 432 433 #define PSR64_IL_SHIFT 20 434 #define PSR64_IL (1 << PSR64_IL_SHIFT) 435 436 /* 437 * SSBS is bit 12 for A64 SPSR and bit 23 for A32 SPSR 438 * I do not want to talk about it! 439 */ 440 #define PSR64_SSBS_SHIFT_32 23 441 #define PSR64_SSBS_SHIFT_64 12 442 #define PSR64_SSBS_32 (1 << PSR64_SSBS_SHIFT_32) 443 #define PSR64_SSBS_64 (1 << PSR64_SSBS_SHIFT_64) 444 445 /* 446 * msr DAIF, Xn and mrs Xn, DAIF transfer into 447 * and out of bits 9:6 448 */ 449 #define DAIF_DEBUG_SHIFT 9 450 #define DAIF_DEBUGF (1 << DAIF_DEBUG_SHIFT) 451 452 #define DAIF_ASYNC_SHIFT 8 453 #define DAIF_ASYNCF (1 << DAIF_ASYNC_SHIFT) 454 455 #define DAIF_IRQF_SHIFT 7 456 #define DAIF_IRQF (1 << DAIF_IRQF_SHIFT) 457 458 #define DAIF_FIQF_SHIFT 6 459 #define DAIF_FIQF (1 << DAIF_FIQF_SHIFT) 460 461 #define DAIF_ALL (DAIF_DEBUGF | DAIF_ASYNCF | DAIF_IRQF | DAIF_FIQF) 462 #define DAIF_STANDARD_DISABLE (DAIF_ASYNCF | DAIF_IRQF | DAIF_FIQF) 463 464 #define SPSR_INTERRUPTS_ENABLED(x) (!(x & DAIF_FIQF)) 465 466 #if HAS_ARM_FEAT_SSBS2 467 #define PSR64_SSBS_U32_DEFAULT PSR64_SSBS_32 468 #define PSR64_SSBS_U64_DEFAULT PSR64_SSBS_64 469 #define PSR64_SSBS_KRN_DEFAULT PSR64_SSBS_64 470 #else 471 #define PSR64_SSBS_U32_DEFAULT (0) 472 #define PSR64_SSBS_U64_DEFAULT (0) 473 #define PSR64_SSBS_KRN_DEFAULT (0) 474 #endif 475 476 /* 477 * msr DAIFSet, Xn, and msr DAIFClr, Xn transfer 478 * from bits 3:0. 479 */ 480 #define DAIFSC_DEBUGF (1 << 3) 481 #define DAIFSC_ASYNCF (1 << 2) 482 #define DAIFSC_IRQF (1 << 1) 483 #define DAIFSC_FIQF (1 << 0) 484 #define DAIFSC_ALL (DAIFSC_DEBUGF | DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) 485 #define DAIFSC_STANDARD_DISABLE (DAIFSC_ASYNCF | DAIFSC_IRQF | DAIFSC_FIQF) 486 #define DAIFSC_NOASYNC (DAIFSC_DEBUGF | DAIFSC_IRQF | DAIFSC_FIQF) 487 488 /* 489 * ARM64_TODO: unify with ARM? 490 */ 491 #define PSR64_CF 0x20000000 /* Carry/Borrow/Extend */ 492 493 #define PSR64_MODE_MASK 0x1F 494 495 #define PSR64_USER_MASK PSR64_NZCV_MASK 496 497 #define PSR64_MODE_USER32_THUMB 0x20 498 499 #define PSR64_MODE_RW_SHIFT 4 500 #define PSR64_MODE_RW_64 0 501 #define PSR64_MODE_RW_32 (0x1 << PSR64_MODE_RW_SHIFT) 502 503 #define PSR64_MODE_EL_SHIFT 2 504 #define PSR64_MODE_EL_MASK (0x3 << PSR64_MODE_EL_SHIFT) 505 #define PSR64_MODE_EL3 (0x3 << PSR64_MODE_EL_SHIFT) 506 #define PSR64_MODE_EL2 (0x2 << PSR64_MODE_EL_SHIFT) 507 #define PSR64_MODE_EL1 (0x1 << PSR64_MODE_EL_SHIFT) 508 #define PSR64_MODE_EL0 0 509 510 #define PSR64_MODE_SPX 0x1 511 #define PSR64_MODE_SP0 0 512 513 #define PSR64_USER32_DEFAULT (PSR64_MODE_RW_32 | PSR64_MODE_EL0 | PSR64_MODE_SP0 | PSR64_SSBS_U32_DEFAULT) 514 #define PSR64_USER64_DEFAULT (PSR64_MODE_RW_64 | PSR64_MODE_EL0 | PSR64_MODE_SP0 | PSR64_SSBS_U64_DEFAULT) 515 #define PSR64_KERNEL_STANDARD (DAIF_STANDARD_DISABLE | PSR64_MODE_RW_64 | PSR64_MODE_EL1 | PSR64_MODE_SP0 | PSR64_SSBS_KRN_DEFAULT) 516 #if __ARM_PAN_AVAILABLE__ 517 #define PSR64_KERNEL_DEFAULT (PSR64_KERNEL_STANDARD | PSR64_PAN) 518 #else 519 #define PSR64_KERNEL_DEFAULT PSR64_KERNEL_STANDARD 520 #endif 521 522 #define PSR64_IS_KERNEL(x) ((x & PSR64_MODE_EL_MASK) > PSR64_MODE_EL0) 523 #define PSR64_IS_USER(x) ((x & PSR64_MODE_EL_MASK) == PSR64_MODE_EL0) 524 525 #define PSR64_IS_USER32(x) (PSR64_IS_USER(x) && (x & PSR64_MODE_RW_32)) 526 #define PSR64_IS_USER64(x) (PSR64_IS_USER(x) && !(x & PSR64_MODE_RW_32)) 527 528 529 530 /* 531 * System Control Register (SCTLR) 532 */ 533 534 #define SCTLR_DSSBS (1ULL << 44) 535 536 #define SCTLR_RESERVED ((3ULL << 28) | (1ULL << 20)) 537 #if defined(HAS_APPLE_PAC) 538 539 // 31 PACIA_ENABLED AddPACIA and AuthIA functions enabled 540 #define SCTLR_PACIA_ENABLED_SHIFT 31 541 #define SCTLR_PACIA_ENABLED (1ULL << SCTLR_PACIA_ENABLED_SHIFT) 542 // 30 PACIB_ENABLED AddPACIB and AuthIB functions enabled 543 #define SCTLR_PACIB_ENABLED (1ULL << 30) 544 // 29:28 RES1 11 545 // 27 PACDA_ENABLED AddPACDA and AuthDA functions enabled 546 #define SCTLR_PACDA_ENABLED (1ULL << 27) 547 // 13 PACDB_ENABLED AddPACDB and AuthDB functions enabled 548 #define SCTLR_PACDB_ENABLED (1ULL << 13) 549 550 #define SCTLR_JOP_KEYS_ENABLED (SCTLR_PACIA_ENABLED | SCTLR_PACDA_ENABLED | SCTLR_PACDB_ENABLED) 551 #endif /* defined(HAS_APPLE_PAC) */ 552 553 // 26 UCI User Cache Instructions 554 #define SCTLR_UCI_ENABLED (1ULL << 26) 555 556 // 25 EE Exception Endianness 557 #define SCTLR_EE_BIG_ENDIAN (1ULL << 25) 558 559 // 24 E0E EL0 Endianness 560 #define SCTLR_E0E_BIG_ENDIAN (1ULL << 24) 561 562 // 23 SPAN Set PAN 563 #define SCTLR_PAN_UNCHANGED (1ULL << 23) 564 565 // 22 EIS Taking an exception is a context synchronization event 566 #define SCTLR_EIS (1ULL << 22) 567 568 // 21 RES0 0 569 // 20 RES1 1 570 571 // 19 WXN Writeable implies eXecute Never 572 #define SCTLR_WXN_ENABLED (1ULL << 19) 573 574 // 18 nTWE Not trap WFE from EL0 575 #define SCTLR_nTWE_WFE_ENABLED (1ULL << 18) 576 577 // 17 RES0 0 578 579 // 16 nTWI Not trap WFI from EL0 580 #define SCTRL_nTWI_WFI_ENABLED (1ULL << 16) 581 582 // 15 UCT User Cache Type register (CTR_EL0) 583 #define SCTLR_UCT_ENABLED (1ULL << 15) 584 585 // 14 DZE User Data Cache Zero (DC ZVA) 586 #define SCTLR_DZE_ENABLED (1ULL << 14) 587 588 // 12 I Instruction cache enable 589 #define SCTLR_I_ENABLED (1ULL << 12) 590 591 // 11 EOS Exception return is a context synchronization event 592 #define SCTLR_EOS (1ULL << 11) 593 594 // 10 EnRCTX EL0 Access to FEAT_SPECRES speculation restriction instructions 595 #define SCTLR_EnRCTX (1ULL << 10) 596 597 // 9 UMA User Mask Access 598 #define SCTLR_UMA_ENABLED (1ULL << 9) 599 600 // 8 SED SETEND Disable 601 #define SCTLR_SED_DISABLED (1ULL << 8) 602 603 // 7 ITD IT Disable 604 #define SCTLR_ITD_DISABLED (1ULL << 7) 605 606 // 6 RES0 0 607 608 // 5 CP15BEN CP15 Barrier ENable 609 #define SCTLR_CP15BEN_ENABLED (1ULL << 5) 610 611 // 4 SA0 Stack Alignment check for EL0 612 #define SCTLR_SA0_ENABLED (1ULL << 4) 613 614 // 3 SA Stack Alignment check 615 #define SCTLR_SA_ENABLED (1ULL << 3) 616 617 // 2 C Cache enable 618 #define SCTLR_C_ENABLED (1ULL << 2) 619 620 // 1 A Alignment check 621 #define SCTLR_A_ENABLED (1ULL << 1) 622 623 // 0 M MMU enable 624 #define SCTLR_M_ENABLED (1ULL << 0) 625 626 #if __ARM_ARCH_8_5__ 627 #define SCTLR_CSEH_DEFAULT (0) 628 #define SCTLR_DSSBS_DEFAULT SCTLR_DSSBS 629 #else 630 #define SCTLR_CSEH_DEFAULT (SCTLR_EIS | SCTLR_EOS) 631 #define SCTLR_DSSBS_DEFAULT (0) 632 #endif 633 634 #if HAS_APPLE_PAC 635 #define SCTLR_ROP_KEYS_DEFAULT SCTLR_PACIB_ENABLED /* IB is ROP */ 636 #else /* !HAS_APPLE_PAC */ 637 #define SCTLR_ROP_KEYS_DEFAULT 0 638 #endif /* HAS_APPLE_PAC */ 639 640 #if HAS_APPLE_PAC 641 #define SCTLR_JOP_KEYS_DEFAULT SCTLR_JOP_KEYS_ENABLED 642 #else /* !HAS_APPLE_PAC */ 643 #define SCTLR_JOP_KEYS_DEFAULT 0 644 #endif 645 646 #define SCTLR_EL1_DEFAULT \ 647 (SCTLR_RESERVED | SCTLR_UCI_ENABLED | SCTLR_nTWE_WFE_ENABLED | SCTLR_DZE_ENABLED | \ 648 SCTLR_I_ENABLED | SCTLR_SED_DISABLED | SCTLR_CP15BEN_ENABLED | \ 649 SCTLR_SA0_ENABLED | SCTLR_SA_ENABLED | SCTLR_C_ENABLED | SCTLR_M_ENABLED | \ 650 SCTLR_CSEH_DEFAULT | SCTLR_DSSBS_DEFAULT | \ 651 SCTLR_ROP_KEYS_DEFAULT | SCTLR_JOP_KEYS_DEFAULT) 652 653 /* 654 * Coprocessor Access Control Register (CPACR) 655 * 656 * 31 28 27 22 21 20 19 0 657 * +---+---+------+------+--------------------+ 658 * |000|TTA|000000| FPEN |00000000000000000000| 659 * +---+---+------+------+--------------------+ 660 * 661 * where: 662 * TTA: Trace trap 663 * FPEN: Floating point enable 664 */ 665 #define CPACR_TTA_SHIFT 28 666 #define CPACR_TTA (1 << CPACR_TTA_SHIFT) 667 668 #define CPACR_FPEN_SHIFT 20 669 #define CPACR_FPEN_EL0_TRAP (0x1 << CPACR_FPEN_SHIFT) 670 #define CPACR_FPEN_ENABLE (0x3 << CPACR_FPEN_SHIFT) 671 672 /* 673 * FPSR: Floating Point Status Register 674 * 675 * 31 30 29 28 27 26 7 6 4 3 2 1 0 676 * +--+--+--+--+--+-------------------+---+--+---+---+---+---+---+ 677 * | N| Z| C| V|QC|0000000000000000000|IDC|00|IXC|UFC|OFC|DZC|IOC| 678 * +--+--+--+--+--+-------------------+---+--+---+---+---+---+---+ 679 */ 680 681 #define FPSR_N_SHIFT 31 682 #define FPSR_Z_SHIFT 30 683 #define FPSR_C_SHIFT 29 684 #define FPSR_V_SHIFT 28 685 #define FPSR_QC_SHIFT 27 686 #define FPSR_IDC_SHIFT 7 687 #define FPSR_IXC_SHIFT 4 688 #define FPSR_UFC_SHIFT 3 689 #define FPSR_OFC_SHIFT 2 690 #define FPSR_DZC_SHIFT 1 691 #define FPSR_IOC_SHIFT 0 692 #define FPSR_N (1 << FPSR_N_SHIFT) 693 #define FPSR_Z (1 << FPSR_Z_SHIFT) 694 #define FPSR_C (1 << FPSR_C_SHIFT) 695 #define FPSR_V (1 << FPSR_V_SHIFT) 696 #define FPSR_QC (1 << FPSR_QC_SHIFT) 697 #define FPSR_IDC (1 << FPSR_IDC_SHIFT) 698 #define FPSR_IXC (1 << FPSR_IXC_SHIFT) 699 #define FPSR_UFC (1 << FPSR_UFC_SHIFT) 700 #define FPSR_OFC (1 << FPSR_OFC_SHIFT) 701 #define FPSR_DZC (1 << FPSR_DZC_SHIFT) 702 #define FPSR_IOC (1 << FPSR_IOC_SHIFT) 703 704 /* 705 * A mask for all for all of the bits that are not RAZ for FPSR; this 706 * is primarily for converting between a 32-bit view of NEON state 707 * (FPSCR) and a 64-bit view of NEON state (FPSR, FPCR). 708 */ 709 #define FPSR_MASK \ 710 (FPSR_N | FPSR_Z | FPSR_C | FPSR_V | FPSR_QC | FPSR_IDC | FPSR_IXC | \ 711 FPSR_UFC | FPSR_OFC | FPSR_DZC | FPSR_IOC) 712 713 /* 714 * FPCR: Floating Point Control Register 715 * 716 * 31 26 25 24 23 21 19 18 15 14 12 11 10 9 8 7 0 717 * +-----+---+--+--+-----+------+--+---+---+--+---+---+---+---+---+--------+ 718 * |00000|AHP|DN|FZ|RMODE|STRIDE| 0|LEN|IDE|00|IXE|UFE|OFE|DZE|IOE|00000000| 719 * +-----+---+--+--+-----+------+--+---+---+--+---+---+---+---+---+--------+ 720 */ 721 722 #define FPCR_AHP_SHIFT 26 723 #define FPCR_DN_SHIFT 25 724 #define FPCR_FZ_SHIFT 24 725 #define FPCR_RMODE_SHIFT 22 726 #define FPCR_STRIDE_SHIFT 20 727 #define FPCR_LEN_SHIFT 16 728 #define FPCR_IDE_SHIFT 15 729 #define FPCR_IXE_SHIFT 12 730 #define FPCR_UFE_SHIFT 11 731 #define FPCR_OFE_SHIFT 10 732 #define FPCR_DZE_SHIFT 9 733 #define FPCR_IOE_SHIFT 8 734 #define FPCR_AHP (1 << FPCR_AHP_SHIFT) 735 #define FPCR_DN (1 << FPCR_DN_SHIFT) 736 #define FPCR_FZ (1 << FPCR_FZ_SHIFT) 737 #define FPCR_RMODE (0x3 << FPCR_RMODE_SHIFT) 738 #define FPCR_STRIDE (0x3 << FPCR_STRIDE_SHIFT) 739 #define FPCR_LEN (0x7 << FPCR_LEN_SHIFT) 740 #define FPCR_IDE (1 << FPCR_IDE_SHIFT) 741 #define FPCR_IXE (1 << FPCR_IXE_SHIFT) 742 #define FPCR_UFE (1 << FPCR_UFE_SHIFT) 743 #define FPCR_OFE (1 << FPCR_OFE_SHIFT) 744 #define FPCR_DZE (1 << FPCR_DZE_SHIFT) 745 #define FPCR_IOE (1 << FPCR_IOE_SHIFT) 746 #define FPCR_DEFAULT (0) 747 #define FPCR_DEFAULT_32 (FPCR_DN|FPCR_FZ) 748 749 /* 750 * A mask for all for all of the bits that are not RAZ for FPCR; this 751 * is primarily for converting between a 32-bit view of NEON state 752 * (FPSCR) and a 64-bit view of NEON state (FPSR, FPCR). 753 */ 754 #define FPCR_MASK \ 755 (FPCR_AHP | FPCR_DN | FPCR_FZ | FPCR_RMODE | FPCR_STRIDE | FPCR_LEN | \ 756 FPCR_IDE | FPCR_IXE | FPCR_UFE | FPCR_OFE | FPCR_DZE | FPCR_IOE) 757 758 /* 759 * Translation Control Register (TCR) 760 * 761 * Legacy: 762 * 763 * 63 39 38 37 36 34 32 30 29 28 27 26 25 24 23 22 21 16 14 13 12 11 10 9 8 7 5 0 764 * +------+----+----+--+-+-----+-+---+-----+-----+-----+----+--+------+-+---+-----+-----+-----+----+-+----+ 765 * | zero |TBI1|TBI0|AS|z| IPS |z|TG1| SH1 |ORGN1|IRGN1|EPD1|A1| T1SZ |z|TG0| SH0 |ORGN0|IRGN0|EPD0|z|T0SZ| 766 * +------+----+----+--+-+-----+-+---+-----+-----+-----+----+--+------+-+---+-----+-----+-----+----+-+----+ 767 * 768 * Current (with 16KB granule support): 769 * 770 * 63 39 38 37 36 34 32 30 29 28 27 26 25 24 23 22 21 16 14 13 12 11 10 9 8 7 5 0 771 * +------+----+----+--+-+-----+-----+-----+-----+-----+----+--+------+-----+-----+-----+-----+----+-+----+ 772 * | zero |TBI1|TBI0|AS|z| IPS | TG1 | SH1 |ORGN1|IRGN1|EPD1|A1| T1SZ | TG0 | SH0 |ORGN0|IRGN0|EPD0|z|T0SZ| 773 * +------+----+----+--+-+-----+-----+-----+-----+-----+----+--+------+-----+-----+-----+-----+----+-+----+ 774 * 775 * TBI1: Top Byte Ignored for TTBR1 region 776 * TBI0: Top Byte Ignored for TTBR0 region 777 * AS: ASID Size 778 * IPS: Physical Address Size limit 779 * TG1: Granule Size for TTBR1 region 780 * SH1: Shareability for TTBR1 region 781 * ORGN1: Outer Cacheability for TTBR1 region 782 * IRGN1: Inner Cacheability for TTBR1 region 783 * EPD1: Translation table walk disable for TTBR1 784 * A1: ASID selection from TTBR1 enable 785 * T1SZ: Virtual address size for TTBR1 786 * TG0: Granule Size for TTBR0 region 787 * SH0: Shareability for TTBR0 region 788 * ORGN0: Outer Cacheability for TTBR0 region 789 * IRGN0: Inner Cacheability for TTBR0 region 790 * T0SZ: Virtual address size for TTBR0 791 */ 792 793 #define TCR_T0SZ_SHIFT 0ULL 794 #define TCR_TSZ_BITS 6ULL 795 #define TCR_TSZ_MASK ((1ULL << TCR_TSZ_BITS) - 1ULL) 796 797 #define TCR_IRGN0_SHIFT 8ULL 798 #define TCR_IRGN0_DISABLED (0ULL << TCR_IRGN0_SHIFT) 799 #define TCR_IRGN0_WRITEBACK (1ULL << TCR_IRGN0_SHIFT) 800 #define TCR_IRGN0_WRITETHRU (2ULL << TCR_IRGN0_SHIFT) 801 #define TCR_IRGN0_WRITEBACKNO (3ULL << TCR_IRGN0_SHIFT) 802 803 #define TCR_ORGN0_SHIFT 10ULL 804 #define TCR_ORGN0_DISABLED (0ULL << TCR_ORGN0_SHIFT) 805 #define TCR_ORGN0_WRITEBACK (1ULL << TCR_ORGN0_SHIFT) 806 #define TCR_ORGN0_WRITETHRU (2ULL << TCR_ORGN0_SHIFT) 807 #define TCR_ORGN0_WRITEBACKNO (3ULL << TCR_ORGN0_SHIFT) 808 809 #define TCR_SH0_SHIFT 12ULL 810 #define TCR_SH0_NONE (0ULL << TCR_SH0_SHIFT) 811 #define TCR_SH0_OUTER (2ULL << TCR_SH0_SHIFT) 812 #define TCR_SH0_INNER (3ULL << TCR_SH0_SHIFT) 813 814 #define TCR_TG0_GRANULE_SHIFT (14ULL) 815 #define TCR_TG0_GRANULE_BITS (2ULL) 816 #define TCR_TG0_GRANULE_MASK ((1ULL << TCR_TG0_GRANULE_BITS) - 1ULL) 817 818 #define TCR_TG0_GRANULE_4KB (0ULL << TCR_TG0_GRANULE_SHIFT) 819 #define TCR_TG0_GRANULE_64KB (1ULL << TCR_TG0_GRANULE_SHIFT) 820 #define TCR_TG0_GRANULE_16KB (2ULL << TCR_TG0_GRANULE_SHIFT) 821 822 #if __ARM_16K_PG__ 823 #define TCR_TG0_GRANULE_SIZE (TCR_TG0_GRANULE_16KB) 824 #else 825 #define TCR_TG0_GRANULE_SIZE (TCR_TG0_GRANULE_4KB) 826 #endif 827 828 #define TCR_T1SZ_SHIFT 16ULL 829 830 #define TCR_A1_ASID1 (1ULL << 22ULL) 831 #define TCR_EPD1_TTBR1_DISABLED (1ULL << 23ULL) 832 833 #define TCR_IRGN1_SHIFT 24ULL 834 #define TCR_IRGN1_DISABLED (0ULL << TCR_IRGN1_SHIFT) 835 #define TCR_IRGN1_WRITEBACK (1ULL << TCR_IRGN1_SHIFT) 836 #define TCR_IRGN1_WRITETHRU (2ULL << TCR_IRGN1_SHIFT) 837 #define TCR_IRGN1_WRITEBACKNO (3ULL << TCR_IRGN1_SHIFT) 838 839 #define TCR_ORGN1_SHIFT 26ULL 840 #define TCR_ORGN1_DISABLED (0ULL << TCR_ORGN1_SHIFT) 841 #define TCR_ORGN1_WRITEBACK (1ULL << TCR_ORGN1_SHIFT) 842 #define TCR_ORGN1_WRITETHRU (2ULL << TCR_ORGN1_SHIFT) 843 #define TCR_ORGN1_WRITEBACKNO (3ULL << TCR_ORGN1_SHIFT) 844 845 #define TCR_SH1_SHIFT 28ULL 846 #define TCR_SH1_NONE (0ULL << TCR_SH1_SHIFT) 847 #define TCR_SH1_OUTER (2ULL << TCR_SH1_SHIFT) 848 #define TCR_SH1_INNER (3ULL << TCR_SH1_SHIFT) 849 850 #define TCR_TG1_GRANULE_SHIFT 30ULL 851 852 #define TCR_TG1_GRANULE_16KB (1ULL << TCR_TG1_GRANULE_SHIFT) 853 #define TCR_TG1_GRANULE_4KB (2ULL << TCR_TG1_GRANULE_SHIFT) 854 #define TCR_TG1_GRANULE_64KB (3ULL << TCR_TG1_GRANULE_SHIFT) 855 856 #if __ARM_16K_PG__ 857 #define TCR_TG1_GRANULE_SIZE (TCR_TG1_GRANULE_16KB) 858 #else 859 #define TCR_TG1_GRANULE_SIZE (TCR_TG1_GRANULE_4KB) 860 #endif 861 862 #define TCR_IPS_SHIFT 32ULL 863 #define TCR_IPS_BITS 3ULL 864 #define TCR_IPS_MASK ((1ULL << TCR_IPS_BITS) - 1ULL) 865 #define TCR_IPS_32BITS (0ULL << TCR_IPS_SHIFT) 866 #define TCR_IPS_36BITS (1ULL << TCR_IPS_SHIFT) 867 #define TCR_IPS_40BITS (2ULL << TCR_IPS_SHIFT) 868 #define TCR_IPS_42BITS (3ULL << TCR_IPS_SHIFT) 869 #define TCR_IPS_44BITS (4ULL << TCR_IPS_SHIFT) 870 #define TCR_IPS_48BITS (5ULL << TCR_IPS_SHIFT) 871 872 #define TCR_AS_16BIT_ASID (1ULL << 36) 873 #define TCR_TBI0_TOPBYTE_IGNORED (1ULL << 37) 874 #define TCR_TBI1_TOPBYTE_IGNORED (1ULL << 38) 875 #define TCR_TBID0_TBI_DATA_ONLY (1ULL << 51) 876 #define TCR_TBID1_TBI_DATA_ONLY (1ULL << 52) 877 878 #if defined(HAS_APPLE_PAC) 879 #define TCR_TBID0_ENABLE TCR_TBID0_TBI_DATA_ONLY 880 #define TCR_TBID1_ENABLE TCR_TBID1_TBI_DATA_ONLY 881 #else 882 #define TCR_TBID0_ENABLE 0 883 #define TCR_TBID1_ENABLE 0 884 #endif 885 886 #define TCR_E0PD0_BIT (1ULL << 55) 887 #define TCR_E0PD1_BIT (1ULL << 56) 888 889 #if defined(HAS_E0PD) 890 #define TCR_E0PD_VALUE (TCR_E0PD1_BIT) 891 #else 892 #define TCR_E0PD_VALUE 0 893 #endif 894 895 896 /* 897 * Multiprocessor Affinity Register (MPIDR_EL1) 898 * 899 * +64-----------------------------31+30+29-25+24+23-16+15-8+7--0+ 900 * |000000000000000000000000000000001| U|00000|MT| Aff2|Aff1|Aff0| 901 * +---------------------------------+--+-----+--+-----+----+----+ 902 * 903 * where 904 * U: Uniprocessor 905 * MT: Multi-threading at lowest affinity level 906 * Aff2: "1" - PCORE, "0" - ECORE 907 * Aff1: Cluster ID 908 * Aff0: CPU ID 909 */ 910 #define MPIDR_AFF0_SHIFT 0 911 #define MPIDR_AFF0_WIDTH 8 912 #define MPIDR_AFF0_MASK (((1 << MPIDR_AFF0_WIDTH) - 1) << MPIDR_AFF0_SHIFT) 913 #define MPIDR_AFF1_SHIFT 8 914 #define MPIDR_AFF1_WIDTH 8 915 #define MPIDR_AFF1_MASK (((1 << MPIDR_AFF1_WIDTH) - 1) << MPIDR_AFF1_SHIFT) 916 #define MPIDR_AFF2_SHIFT 16 917 #define MPIDR_AFF2_WIDTH 8 918 #define MPIDR_AFF2_MASK (((1 << MPIDR_AFF2_WIDTH) - 1) << MPIDR_AFF2_SHIFT) 919 920 /* 921 * TXSZ indicates the size of the range a TTBR covers. Currently, 922 * we support the following: 923 * 924 * 4KB pages, full page L1: 39 bit range. 925 * 4KB pages, sub-page L1: 38 bit range. 926 * 16KB pages, full page L1: 47 bit range. 927 * 16KB pages, sub-page L1: 39 bit range. 928 * 16KB pages, two level page tables: 36 bit range. 929 */ 930 #if __ARM_KERNEL_PROTECT__ 931 /* 932 * If we are configured to use __ARM_KERNEL_PROTECT__, the first half of the 933 * address space is used for the mappings that will remain in place when in EL0. 934 * As a result, 1 bit less of address space is available to the rest of the 935 * the kernel. 936 */ 937 #endif /* __ARM_KERNEL_PROTECT__ */ 938 #ifdef __ARM_16K_PG__ 939 #if __ARM64_PMAP_SUBPAGE_L1__ 940 #define T0SZ_BOOT 25ULL 941 #else /* !__ARM64_PMAP_SUBPAGE_L1__ */ 942 #define T0SZ_BOOT 17ULL 943 #endif /* !__ARM64_PMAP_SUBPAGE_L1__ */ 944 #else /* __ARM_16K_PG__ */ 945 #if __ARM64_PMAP_SUBPAGE_L1__ 946 #define T0SZ_BOOT 26ULL 947 #else /* __ARM64_PMAP_SUBPAGE_L1__ */ 948 #define T0SZ_BOOT 25ULL 949 #endif /* __ARM64_PMAP_SUBPAGE_L1__ */ 950 #endif /* __ARM_16K_PG__ */ 951 952 #if defined(APPLE_ARM64_ARCH_FAMILY) 953 /* T0SZ must be the same as T1SZ */ 954 #define T1SZ_BOOT T0SZ_BOOT 955 #else /* defined(APPLE_ARM64_ARCH_FAMILY) */ 956 #ifdef __ARM_16K_PG__ 957 #if __ARM64_PMAP_SUBPAGE_L1__ 958 #define T1SZ_BOOT 25ULL 959 #else /* !__ARM64_PMAP_SUBPAGE_L1__ */ 960 #define T1SZ_BOOT 17ULL 961 #endif /* !__ARM64_PMAP_SUBPAGE_L1__ */ 962 #else /* __ARM_16K_PG__ */ 963 #if __ARM64_PMAP_SUBPAGE_L1__ 964 #define T1SZ_BOOT 26ULL 965 #else /* __ARM64_PMAP_SUBPAGE_L1__ */ 966 #define T1SZ_BOOT 25ULL 967 #endif /*__ARM64_PMAP_SUBPAGE_L1__*/ 968 #endif /* __ARM_16K_PG__ */ 969 #endif /* defined(APPLE_ARM64_ARCH_FAMILY) */ 970 971 #if __ARM_42BIT_PA_SPACE__ 972 #define TCR_IPS_VALUE TCR_IPS_42BITS 973 #else /* !__ARM_42BIT_PA_SPACE__ */ 974 #define TCR_IPS_VALUE TCR_IPS_40BITS 975 #endif /* !__ARM_42BIT_PA_SPACE__ */ 976 977 #if CONFIG_KERNEL_TBI 978 #define TCR_EL1_DTBI (TCR_TBI1_TOPBYTE_IGNORED | TCR_TBID1_ENABLE) 979 #else /* CONFIG_KERNEL_TBI */ 980 #define TCR_EL1_DTBI 0 981 #endif /* CONFIG_KERNEL_TBI */ 982 983 #define TCR_EL1_BASE \ 984 (TCR_IPS_VALUE | TCR_SH0_OUTER | TCR_ORGN0_WRITEBACK | \ 985 TCR_IRGN0_WRITEBACK | (T0SZ_BOOT << TCR_T0SZ_SHIFT) | \ 986 TCR_SH1_OUTER | TCR_ORGN1_WRITEBACK | \ 987 TCR_IRGN1_WRITEBACK | (TCR_TG1_GRANULE_SIZE) | \ 988 TCR_TBI0_TOPBYTE_IGNORED | (TCR_TBID0_ENABLE) | TCR_E0PD_VALUE | \ 989 TCR_EL1_DTBI) 990 991 #if __ARM_KERNEL_PROTECT__ 992 #define TCR_EL1_BOOT (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE)) 993 #define T1SZ_USER (T1SZ_BOOT + 1) 994 #define TCR_EL1_USER (TCR_EL1_BASE | (T1SZ_USER << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE)) 995 #else 996 #define TCR_EL1_BOOT (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_SIZE)) 997 #endif /* __ARM_KERNEL_PROTECT__ */ 998 999 #define TCR_EL1_4KB (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_4KB)) 1000 #define TCR_EL1_16KB (TCR_EL1_BASE | (T1SZ_BOOT << TCR_T1SZ_SHIFT) | (TCR_TG0_GRANULE_16KB)) 1001 1002 1003 1004 1005 /* 1006 * Monitor Debug System Control Register (MDSCR) 1007 */ 1008 1009 #define MDSCR_TFO_SHIFT 31 1010 #define MDSCR_TFO (1ULL << MDSCR_TFO_SHIFT) 1011 #define MDSCR_RXFULL_SHIFT 30 1012 #define MDSCR_RXFULL (1ULL << MDSCR_RXFULL_SHIFT) 1013 #define MDSCR_TXFULL_SHIFT 29 1014 #define MDSCR_TXFULL (1ULL << MDSCR_TXFULL_SHIFT) 1015 #define MDSCR_RXO_SHIFT 27 1016 #define MDSCR_RXO (1ULL << MDSCR_RXO_SHIFT) 1017 #define MDSCR_TXU_SHIFT 26 1018 #define MDSCR_TXU (1ULL << MDSCR_TXU_SHIFT) 1019 #define MDSCR_INTDIS_SHIFT 22 1020 #define MDSCR_INTDIS_MASK (0x2U << MDSCR_INTDIS_SHIFT) 1021 #define MDSCR_TDA_SHIFT 21 1022 #define MDSCR_TDA (1ULL << MDSCR_TDA_SHIFT) 1023 #define MDSCR_SC2_SHIFT 19 1024 #define MDSCR_SC2 (1ULL << MDSCR_SC2_SHIFT) 1025 #define MDSCR_MDE_SHIFT 15 1026 #define MDSCR_MDE (1ULL << MDSCR_MDE_SHIFT) 1027 #define MDSCR_HDE_SHIFT 14 1028 #define MDSCR_HDE (1ULL << MDSCR_HDE_SHIFT) 1029 #define MDSCR_KDE_SHIFT 13 1030 #define MDSCR_KDE (1ULL << MDSCR_KDE_SHIFT) 1031 #define MDSCR_TDCC_SHIFT 12 1032 #define MDSCR_TDCC (1ULL << MDSCR_TDCC_SHIFT) 1033 #define MDSCR_ERR_SHIFT 6 1034 #define MDSCR_ERR (1ULL << MDSCR_ERR_SHIFT) 1035 #define MDSCR_SS_SHIFT 0 1036 #define MDSCR_SS (1ULL << MDSCR_SS_SHIFT) 1037 1038 /* 1039 * Translation Table Base Register (TTBR) 1040 * 1041 * 63 48 47 x x-1 0 1042 * +--------+------------------+------+ 1043 * | ASID | Base Address | zero | 1044 * +--------+------------------+------+ 1045 * 1046 */ 1047 #define TTBR_ASID_SHIFT 48 1048 #define TTBR_ASID_MASK 0xffff000000000000 1049 1050 #define TTBR_BADDR_MASK 0x0000ffffffffffff 1051 1052 /* 1053 * Memory Attribute Indirection Register 1054 * 1055 * 63 56 55 48 47 40 39 32 31 24 23 16 15 8 7 0 1056 * +-------+-------+-------+-------+-------+-------+-------+-------+ 1057 * | Attr7 | Attr6 | Attr5 | Attr4 | Attr3 | Attr2 | Attr1 | Attr0 | 1058 * +-------+-------+-------+-------+-------+-------+-------+-------+ 1059 * 1060 */ 1061 1062 #define MAIR_ATTR_SHIFT(x) (8*(x)) 1063 1064 /* Strongly ordered or device memory attributes */ 1065 #define MAIR_OUTER_STRONGLY_ORDERED 0x0 1066 #define MAIR_OUTER_DEVICE 0x0 1067 1068 #define MAIR_INNER_STRONGLY_ORDERED 0x0 1069 #define MAIR_INNER_DEVICE 0x4 1070 1071 /* Normal memory attributes */ 1072 #define MAIR_OUTER_NON_CACHEABLE 0x40 1073 #define MAIR_OUTER_WRITE_THROUGH 0x80 1074 #define MAIR_OUTER_WRITE_BACK 0xc0 1075 1076 #define MAIR_INNER_NON_CACHEABLE 0x4 1077 #define MAIR_INNER_WRITE_THROUGH 0x8 1078 #define MAIR_INNER_WRITE_BACK 0xc 1079 1080 /* Allocate policy for cacheable memory */ 1081 #define MAIR_OUTER_WRITE_ALLOCATE 0x10 1082 #define MAIR_OUTER_READ_ALLOCATE 0x20 1083 1084 #define MAIR_INNER_WRITE_ALLOCATE 0x1 1085 #define MAIR_INNER_READ_ALLOCATE 0x2 1086 1087 /* Memory Atribute Encoding */ 1088 1089 /* 1090 * Device memory types: 1091 * G (gathering): multiple reads/writes can be combined 1092 * R (reordering): reads or writes may reach device out of program order 1093 * E (early-acknowledge): writes may return immediately (e.g. PCIe posted writes) 1094 */ 1095 #define MAIR_DISABLE 0x00 /* Device Memory, nGnRnE (strongly ordered) */ 1096 #define MAIR_POSTED 0x04 /* Device Memory, nGnRE (strongly ordered, posted writes) */ 1097 #define MAIR_POSTED_REORDERED 0x08 /* Device Memory, nGRE (reorderable, posted writes) */ 1098 #define MAIR_POSTED_COMBINED_REORDERED 0x0C /* Device Memory, GRE (reorderable, gathered writes, posted writes) */ 1099 #define MAIR_WRITECOMB 0x44 /* Normal Memory, Outer Non-Cacheable, Inner Non-Cacheable */ 1100 #define MAIR_WRITETHRU 0xBB /* Normal Memory, Outer Write-through, Inner Write-through */ 1101 #define MAIR_WRITEBACK 0xFF /* Normal Memory, Outer Write-back, Inner Write-back */ 1102 #define MAIR_INNERWRITEBACK 0x4F /* Normal Memory, Outer Non-Cacheable, Inner Write-back */ 1103 1104 1105 /* 1106 * ARM 4-level Page Table support - 2*1024TB (2^48) of address space 1107 */ 1108 1109 1110 /* 1111 * Memory Attribute Index. If these values change, please also update the pmap 1112 * LLDB macros that rely on this value (e.g., PmapDecodeTTEARM64). 1113 */ 1114 #define CACHE_ATTRINDX_WRITEBACK 0x0 /* cache enabled, buffer enabled (normal memory) */ 1115 #define CACHE_ATTRINDX_WRITECOMB 0x1 /* no cache, buffered writes (normal memory) */ 1116 #define CACHE_ATTRINDX_WRITETHRU 0x2 /* cache enabled, buffer disabled (normal memory) */ 1117 #define CACHE_ATTRINDX_DISABLE 0x3 /* no cache, no buffer (device memory) */ 1118 #define CACHE_ATTRINDX_INNERWRITEBACK 0x4 /* inner cache enabled, buffer enabled, write allocate (normal memory) */ 1119 #define CACHE_ATTRINDX_POSTED 0x5 /* no cache, no buffer, posted writes (device memory) */ 1120 #define CACHE_ATTRINDX_POSTED_REORDERED 0x6 /* no cache, reorderable access, posted writes (device memory) */ 1121 #define CACHE_ATTRINDX_POSTED_COMBINED_REORDERED 0x7 /* no cache, write gathering, reorderable access, posted writes (device memory) */ 1122 #define CACHE_ATTRINDX_DEFAULT CACHE_ATTRINDX_WRITEBACK 1123 1124 1125 /* 1126 * Access protection bit values (TTEs and PTEs), stage 1 1127 * 1128 * Bit 1 controls access type (1=RO, 0=RW), bit 0 controls user (1=access, 0=no access) 1129 */ 1130 #define AP_RWNA 0x0 /* priv=read-write, user=no-access */ 1131 #define AP_RWRW 0x1 /* priv=read-write, user=read-write */ 1132 #define AP_RONA 0x2 /* priv=read-only, user=no-access */ 1133 #define AP_RORO 0x3 /* priv=read-only, user=read-only */ 1134 #define AP_MASK 0x3 /* mask to find ap bits */ 1135 1136 /* 1137 * Shareability attributes 1138 */ 1139 #define SH_NONE 0x0 /* Non shareable */ 1140 #define SH_NONE 0x0 /* Device shareable */ 1141 #define SH_DEVICE 0x2 /* Normal memory Inner non shareable - Outer non shareable */ 1142 #define SH_OUTER_MEMORY 0x2 /* Normal memory Inner shareable - Outer shareable */ 1143 #define SH_INNER_MEMORY 0x3 /* Normal memory Inner shareable - Outer non shareable */ 1144 1145 1146 /* 1147 * ARM Page Granule 1148 */ 1149 #ifdef __ARM_16K_PG__ 1150 #define ARM_PGSHIFT 14 1151 #else 1152 #define ARM_PGSHIFT 12 1153 #endif 1154 #define ARM_PGBYTES (1 << ARM_PGSHIFT) 1155 #define ARM_PGMASK (ARM_PGBYTES-1) 1156 1157 /* 1158 * L0 Translation table 1159 * 1160 * 4KB granule size: 1161 * Each translation table is 4KB 1162 * 512 64-bit entries of 512GB (2^39) of address space. 1163 * Covers 256TB (2^48) of address space. 1164 * 1165 * 16KB granule size: 1166 * Each translation table is 16KB 1167 * 2 64-bit entries of 128TB (2^47) of address space. 1168 * Covers 256TB (2^48) of address space. 1169 */ 1170 1171 /* 16K L0 */ 1172 #define ARM_16K_TT_L0_SIZE 0x0000800000000000ULL /* size of area covered by a tte */ 1173 #define ARM_16K_TT_L0_OFFMASK 0x00007fffffffffffULL /* offset within an L0 entry */ 1174 #define ARM_16K_TT_L0_SHIFT 47 /* page descriptor shift */ 1175 #define ARM_16K_TT_L0_INDEX_MASK 0x0000800000000000ULL /* mask for getting index in L0 table from virtual address */ 1176 1177 /* 4K L0 */ 1178 #define ARM_4K_TT_L0_SIZE 0x0000008000000000ULL /* size of area covered by a tte */ 1179 #define ARM_4K_TT_L0_OFFMASK 0x0000007fffffffffULL /* offset within an L0 entry */ 1180 #define ARM_4K_TT_L0_SHIFT 39 /* page descriptor shift */ 1181 #define ARM_4K_TT_L0_INDEX_MASK 0x0000ff8000000000ULL /* mask for getting index in L0 table from virtual address */ 1182 1183 /* 1184 * L1 Translation table 1185 * 1186 * 4KB granule size: 1187 * Each translation table is 4KB 1188 * 512 64-bit entries of 1GB (2^30) of address space. 1189 * Covers 512GB (2^39) of address space. 1190 * 1191 * 16KB granule size: 1192 * Each translation table is 16KB 1193 * 2048 64-bit entries of 64GB (2^36) of address space. 1194 * Covers 128TB (2^47) of address space. 1195 */ 1196 1197 /* 16K L1 */ 1198 #define ARM_16K_TT_L1_SIZE 0x0000001000000000ULL /* size of area covered by a tte */ 1199 #define ARM_16K_TT_L1_OFFMASK 0x0000000fffffffffULL /* offset within an L1 entry */ 1200 #define ARM_16K_TT_L1_SHIFT 36 /* page descriptor shift */ 1201 #if __ARM64_PMAP_SUBPAGE_L1__ && __ARM_16K_PG__ 1202 /* This config supports 512GB per TTBR. */ 1203 #define ARM_16K_TT_L1_INDEX_MASK 0x0000007000000000ULL /* mask for getting index into L1 table from virtual address */ 1204 #else /* __ARM64_PMAP_SUBPAGE_L1__ */ 1205 #define ARM_16K_TT_L1_INDEX_MASK 0x00007ff000000000ULL /* mask for getting index into L1 table from virtual address */ 1206 #endif /* __ARM64_PMAP_SUBPAGE_L1__ */ 1207 1208 /* 4K L1 */ 1209 #define ARM_4K_TT_L1_SIZE 0x0000000040000000ULL /* size of area covered by a tte */ 1210 #define ARM_4K_TT_L1_OFFMASK 0x000000003fffffffULL /* offset within an L1 entry */ 1211 #define ARM_4K_TT_L1_SHIFT 30 /* page descriptor shift */ 1212 #if __ARM64_PMAP_SUBPAGE_L1__ && !__ARM_16K_PG__ 1213 /* This config supports 256GB per TTBR. */ 1214 #define ARM_4K_TT_L1_INDEX_MASK 0x0000003fc0000000ULL /* mask for getting index into L1 table from virtual address */ 1215 #else /* __ARM64_PMAP_SUBPAGE_L1__ */ 1216 #define ARM_4K_TT_L1_INDEX_MASK 0x0000007fc0000000ULL /* mask for getting index into L1 table from virtual address */ 1217 #endif /* __ARM64_PMAP_SUBPAGE_L1__ */ 1218 1219 /* some sugar for getting pointers to page tables and entries */ 1220 1221 #define L1_TABLE_INDEX(va) (((va) & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT) 1222 #define L2_TABLE_INDEX(va) (((va) & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT) 1223 #define L3_TABLE_INDEX(va) (((va) & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT) 1224 1225 #define L2_TABLE_VA(tte) ((tt_entry_t*) phystokv((*(tte)) & ARM_TTE_TABLE_MASK)) 1226 #define L3_TABLE_VA(tte2) ((pt_entry_t*) phystokv((*(tte2)) & ARM_TTE_TABLE_MASK)) 1227 1228 /* 1229 * L2 Translation table 1230 * 1231 * 4KB granule size: 1232 * Each translation table is 4KB 1233 * 512 64-bit entries of 2MB (2^21) of address space. 1234 * Covers 1GB (2^30) of address space. 1235 * 1236 * 16KB granule size: 1237 * Each translation table is 16KB 1238 * 2048 64-bit entries of 32MB (2^25) of address space. 1239 * Covers 64GB (2^36) of address space. 1240 */ 1241 1242 /* 16K L2 */ 1243 #define ARM_16K_TT_L2_SIZE 0x0000000002000000ULL /* size of area covered by a tte */ 1244 #define ARM_16K_TT_L2_OFFMASK 0x0000000001ffffffULL /* offset within an L2 entry */ 1245 #define ARM_16K_TT_L2_SHIFT 25 /* page descriptor shift */ 1246 #define ARM_16K_TT_L2_INDEX_MASK 0x0000000ffe000000ULL /* mask for getting index in L2 table from virtual address */ 1247 1248 /* 4K L2 */ 1249 #define ARM_4K_TT_L2_SIZE 0x0000000000200000ULL /* size of area covered by a tte */ 1250 #define ARM_4K_TT_L2_OFFMASK 0x00000000001fffffULL /* offset within an L2 entry */ 1251 #define ARM_4K_TT_L2_SHIFT 21 /* page descriptor shift */ 1252 #define ARM_4K_TT_L2_INDEX_MASK 0x000000003fe00000ULL /* mask for getting index in L2 table from virtual address */ 1253 1254 /* 1255 * L3 Translation table 1256 * 1257 * 4KB granule size: 1258 * Each translation table is 4KB 1259 * 512 64-bit entries of 4KB (2^12) of address space. 1260 * Covers 2MB (2^21) of address space. 1261 * 1262 * 16KB granule size: 1263 * Each translation table is 16KB 1264 * 2048 64-bit entries of 16KB (2^14) of address space. 1265 * Covers 32MB (2^25) of address space. 1266 */ 1267 1268 /* 16K L3 */ 1269 #define ARM_16K_TT_L3_SIZE 0x0000000000004000ULL /* size of area covered by a tte */ 1270 #define ARM_16K_TT_L3_OFFMASK 0x0000000000003fffULL /* offset within L3 PTE */ 1271 #define ARM_16K_TT_L3_SHIFT 14 /* page descriptor shift */ 1272 #define ARM_16K_TT_L3_INDEX_MASK 0x0000000001ffc000ULL /* mask for page descriptor index */ 1273 1274 /* 4K L3 */ 1275 #define ARM_4K_TT_L3_SIZE 0x0000000000001000ULL /* size of area covered by a tte */ 1276 #define ARM_4K_TT_L3_OFFMASK 0x0000000000000fffULL /* offset within L3 PTE */ 1277 #define ARM_4K_TT_L3_SHIFT 12 /* page descriptor shift */ 1278 #define ARM_4K_TT_L3_INDEX_MASK 0x00000000001ff000ULL /* mask for page descriptor index */ 1279 1280 #ifdef __ARM_16K_PG__ 1281 1282 /* Native L0 defines */ 1283 #define ARM_TT_L0_SIZE ARM_16K_TT_L0_SIZE 1284 #define ARM_TT_L0_OFFMASK ARM_16K_TT_L0_OFFMASK 1285 #define ARM_TT_L0_SHIFT ARM_16K_TT_L0_SHIFT 1286 #define ARM_TT_L0_INDEX_MASK ARM_16K_TT_L0_INDEX_MASK 1287 1288 /* Native L1 defines */ 1289 #define ARM_TT_L1_SIZE ARM_16K_TT_L1_SIZE 1290 #define ARM_TT_L1_OFFMASK ARM_16K_TT_L1_OFFMASK 1291 #define ARM_TT_L1_SHIFT ARM_16K_TT_L1_SHIFT 1292 #define ARM_TT_L1_INDEX_MASK ARM_16K_TT_L1_INDEX_MASK 1293 1294 /* Native L2 defines */ 1295 #define ARM_TT_L2_SIZE ARM_16K_TT_L2_SIZE 1296 #define ARM_TT_L2_OFFMASK ARM_16K_TT_L2_OFFMASK 1297 #define ARM_TT_L2_SHIFT ARM_16K_TT_L2_SHIFT 1298 #define ARM_TT_L2_INDEX_MASK ARM_16K_TT_L2_INDEX_MASK 1299 1300 /* Native L3 defines */ 1301 #define ARM_TT_L3_SIZE ARM_16K_TT_L3_SIZE 1302 #define ARM_TT_L3_OFFMASK ARM_16K_TT_L3_OFFMASK 1303 #define ARM_TT_L3_SHIFT ARM_16K_TT_L3_SHIFT 1304 #define ARM_TT_L3_INDEX_MASK ARM_16K_TT_L3_INDEX_MASK 1305 1306 #else /* !__ARM_16K_PG__ */ 1307 1308 /* Native L0 defines */ 1309 #define ARM_TT_L0_SIZE ARM_4K_TT_L0_SIZE 1310 #define ARM_TT_L0_OFFMASK ARM_4K_TT_L0_OFFMASK 1311 #define ARM_TT_L0_SHIFT ARM_4K_TT_L0_SHIFT 1312 #define ARM_TT_L0_INDEX_MASK ARM_4K_TT_L0_INDEX_MASK 1313 1314 /* Native L1 defines */ 1315 #define ARM_TT_L1_SIZE ARM_4K_TT_L1_SIZE 1316 #define ARM_TT_L1_OFFMASK ARM_4K_TT_L1_OFFMASK 1317 #define ARM_TT_L1_SHIFT ARM_4K_TT_L1_SHIFT 1318 #define ARM_TT_L1_INDEX_MASK ARM_4K_TT_L1_INDEX_MASK 1319 1320 /* Native L2 defines */ 1321 #define ARM_TT_L2_SIZE ARM_4K_TT_L2_SIZE 1322 #define ARM_TT_L2_OFFMASK ARM_4K_TT_L2_OFFMASK 1323 #define ARM_TT_L2_SHIFT ARM_4K_TT_L2_SHIFT 1324 #define ARM_TT_L2_INDEX_MASK ARM_4K_TT_L2_INDEX_MASK 1325 1326 /* Native L3 defines */ 1327 #define ARM_TT_L3_SIZE ARM_4K_TT_L3_SIZE 1328 #define ARM_TT_L3_OFFMASK ARM_4K_TT_L3_OFFMASK 1329 #define ARM_TT_L3_SHIFT ARM_4K_TT_L3_SHIFT 1330 #define ARM_TT_L3_INDEX_MASK ARM_4K_TT_L3_INDEX_MASK 1331 1332 #endif /* !__ARM_16K_PG__ */ 1333 1334 /* 1335 * Convenience definitions for: 1336 * ARM_TT_LEAF: The last level of the configured page table format. 1337 * ARM_TT_TWIG: The second to last level of the configured page table format. 1338 * ARM_TT_ROOT: The first level of the configured page table format. 1339 * 1340 * My apologies to any botanists who may be reading this. 1341 */ 1342 #define ARM_TT_LEAF_SIZE ARM_TT_L3_SIZE 1343 #define ARM_TT_LEAF_OFFMASK ARM_TT_L3_OFFMASK 1344 #define ARM_TT_LEAF_SHIFT ARM_TT_L3_SHIFT 1345 #define ARM_TT_LEAF_INDEX_MASK ARM_TT_L3_INDEX_MASK 1346 1347 #define ARM_TT_TWIG_SIZE ARM_TT_L2_SIZE 1348 #define ARM_TT_TWIG_OFFMASK ARM_TT_L2_OFFMASK 1349 #define ARM_TT_TWIG_SHIFT ARM_TT_L2_SHIFT 1350 #define ARM_TT_TWIG_INDEX_MASK ARM_TT_L2_INDEX_MASK 1351 1352 #define ARM_TT_ROOT_SIZE ARM_TT_L1_SIZE 1353 #define ARM_TT_ROOT_OFFMASK ARM_TT_L1_OFFMASK 1354 #define ARM_TT_ROOT_SHIFT ARM_TT_L1_SHIFT 1355 #define ARM_TT_ROOT_INDEX_MASK ARM_TT_L1_INDEX_MASK 1356 1357 /* 1358 * 4KB granule size: 1359 * 1360 * Level 0 Translation Table Entry 1361 * 1362 * 63 62 61 60 59 58 52 51 48 47 12 11 2 1 0 1363 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1364 * |NS| AP |XN|PXN|ignored| zero | L1TableOutputAddress |ignored|1|V| 1365 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1366 * 1367 * Level 1 Translation Table Entry 1368 * 1369 * 63 62 61 60 59 58 52 51 48 47 12 11 2 1 0 1370 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1371 * |NS| AP |XN|PXN|ignored| zero | L2TableOutputAddress |ignored|1|V| 1372 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1373 * 1374 * Level 1 Translation Block Entry 1375 * 1376 * 63 59 58 55 54 53 52 51 48 47 30 29 12 11 10 9 8 7 6 5 4 2 1 0 1377 * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ 1378 * | ign |sw use|XN|PXN|HINT| zero | OutputAddress[47:30] | zero |nG|AF| SH | AP |NS|AttrIdx|0|V| 1379 * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ 1380 * 1381 * Level 2 Translation Table Entry 1382 * 1383 * 63 62 61 60 59 58 52 51 48 47 12 11 2 1 0 1384 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1385 * |NS| AP |XN|PXN|ignored| zero | L3TableOutputAddress |ignored|1|V| 1386 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1387 * 1388 * Level 2 Translation Block Entry 1389 * 1390 * 63 59 58 55 54 53 52 51 48 47 21 20 12 11 10 9 8 7 6 5 4 2 1 0 1391 * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ 1392 * | ign |sw use|XN|PXN|HINT| zero | OutputAddress[47:21] | zero |nG|AF| SH | AP |NS|AttrIdx|0|V| 1393 * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ 1394 * 1395 * 16KB granule size: 1396 * 1397 * Level 0 Translation Table Entry 1398 * 1399 * 63 62 61 60 59 58 52 51 48 47 14 13 2 1 0 1400 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1401 * |NS| AP |XN|PXN|ignored| zero | L1TableOutputAddress |ignored|1|V| 1402 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1403 * 1404 * Level 1 Translation Table Entry 1405 * 1406 * 63 62 61 60 59 58 52 51 48 47 14 13 2 1 0 1407 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1408 * |NS| AP |XN|PXN|ignored| zero | L2TableOutputAddress |ignored|1|V| 1409 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1410 * 1411 * Level 2 Translation Table Entry 1412 * 1413 * 63 62 61 60 59 58 52 51 48 47 14 13 2 1 0 1414 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1415 * |NS| AP |XN|PXN|ignored| zero | L3TableOutputAddress |ignored|1|V| 1416 * +--+-----+--+---+-------+------+----------------------+-------+-+-+ 1417 * 1418 * Level 2 Translation Block Entry 1419 * 1420 * 63 59 58 55 54 53 52 51 48 47 25 24 12 11 10 9 8 7 6 5 4 2 1 0 1421 * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ 1422 * | ign |sw use|XN|PXN|HINT| zero | OutputAddress[47:25] | zero |nG|AF| SH | AP |NS|AttrIdx|0|V| 1423 * +-----+------+--+---+----+------+----------------------+------+--+--+----+----+--+-------+-+-+ 1424 * 1425 * where: 1426 * nG: notGlobal bit 1427 * SH: Shareability field 1428 * AP: access protection 1429 * XN: eXecute Never bit 1430 * PXN: Privilege eXecute Never bit 1431 * NS: Non-Secure bit 1432 * HINT: 16 entry continuguous output hint 1433 * AttrIdx: Memory Attribute Index 1434 */ 1435 1436 #define TTE_SHIFT 3 /* shift width of a tte (sizeof(tte) == (1 << TTE_SHIFT)) */ 1437 #ifdef __ARM_16K_PG__ 1438 #define TTE_PGENTRIES (16384 >> TTE_SHIFT) /* number of ttes per page */ 1439 #else 1440 #define TTE_PGENTRIES (4096 >> TTE_SHIFT) /* number of ttes per page */ 1441 #endif 1442 1443 #define ARM_TTE_MAX (TTE_PGENTRIES) 1444 1445 #define ARM_TTE_EMPTY 0x0000000000000000ULL /* unasigned - invalid entry */ 1446 #define ARM_TTE_TYPE_FAULT 0x0000000000000000ULL /* unasigned - invalid entry */ 1447 1448 #define ARM_TTE_VALID 0x0000000000000001ULL /* valid entry */ 1449 1450 #define ARM_TTE_TYPE_MASK 0x0000000000000002ULL /* mask for extracting the type */ 1451 #define ARM_TTE_TYPE_TABLE 0x0000000000000002ULL /* page table type */ 1452 #define ARM_TTE_TYPE_BLOCK 0x0000000000000000ULL /* block entry type */ 1453 #define ARM_TTE_TYPE_L3BLOCK 0x0000000000000002ULL 1454 #define ARM_TTE_TYPE_MASK 0x0000000000000002ULL /* mask for extracting the type */ 1455 1456 #ifdef __ARM_16K_PG__ 1457 /* 1458 * Note that L0/L1 block entries are disallowed for the 16KB granule size; what 1459 * are we doing with these? 1460 */ 1461 #define ARM_TTE_BLOCK_SHIFT 12 /* entry shift for a 16KB L3 TTE entry */ 1462 #define ARM_TTE_BLOCK_L0_SHIFT ARM_TT_L0_SHIFT /* block shift for 128TB section */ 1463 #define ARM_TTE_BLOCK_L1_MASK 0x0000fff000000000ULL /* mask to extract phys address from L1 block entry */ 1464 #define ARM_TTE_BLOCK_L1_SHIFT ARM_TT_L1_SHIFT /* block shift for 64GB section */ 1465 #define ARM_TTE_BLOCK_L2_MASK 0x0000fffffe000000ULL /* mask to extract phys address from Level 2 Translation Block entry */ 1466 #define ARM_TTE_BLOCK_L2_SHIFT ARM_TT_L2_SHIFT /* block shift for 32MB section */ 1467 #else 1468 #define ARM_TTE_BLOCK_SHIFT 12 /* entry shift for a 4KB L3 TTE entry */ 1469 #define ARM_TTE_BLOCK_L0_SHIFT ARM_TT_L0_SHIFT /* block shift for 2048GB section */ 1470 #define ARM_TTE_BLOCK_L1_MASK 0x0000ffffc0000000ULL /* mask to extract phys address from L1 block entry */ 1471 #define ARM_TTE_BLOCK_L1_SHIFT ARM_TT_L1_SHIFT /* block shift for 1GB section */ 1472 #define ARM_TTE_BLOCK_L2_MASK 0x0000ffffffe00000ULL /* mask to extract phys address from Level 2 Translation Block entry */ 1473 #define ARM_TTE_BLOCK_L2_SHIFT ARM_TT_L2_SHIFT /* block shift for 2MB section */ 1474 #endif 1475 1476 #define ARM_TTE_BLOCK_APSHIFT 6 1477 #define ARM_TTE_BLOCK_AP(x) ((x)<<ARM_TTE_BLOCK_APSHIFT) /* access protection */ 1478 #define ARM_TTE_BLOCK_APMASK (0x3 << ARM_TTE_BLOCK_APSHIFT) 1479 1480 #define ARM_TTE_BLOCK_ATTRINDX(x) ((x) << 2) /* memory attributes index */ 1481 #define ARM_TTE_BLOCK_ATTRINDXMASK (0x7ULL << 2) /* mask memory attributes index */ 1482 1483 #define ARM_TTE_BLOCK_SH(x) ((x) << 8) /* access shared */ 1484 #define ARM_TTE_BLOCK_SHMASK (0x3ULL << 8) /* mask access shared */ 1485 1486 #define ARM_TTE_BLOCK_AF 0x0000000000000400ULL /* value for access */ 1487 #define ARM_TTE_BLOCK_AFMASK 0x0000000000000400ULL /* access mask */ 1488 1489 #define ARM_TTE_BLOCK_NG 0x0000000000000800ULL /* value for a global mapping */ 1490 #define ARM_TTE_BLOCK_NG_MASK 0x0000000000000800ULL /* notGlobal mapping mask */ 1491 1492 #define ARM_TTE_BLOCK_NS 0x0000000000000020ULL /* value for a secure mapping */ 1493 #define ARM_TTE_BLOCK_NS_MASK 0x0000000000000020ULL /* notSecure mapping mask */ 1494 1495 #define ARM_TTE_BLOCK_PNX 0x0020000000000000ULL /* value for privilege no execute bit */ 1496 #define ARM_TTE_BLOCK_PNXMASK 0x0020000000000000ULL /* privilege no execute mask */ 1497 1498 #define ARM_TTE_BLOCK_NX 0x0040000000000000ULL /* value for no execute */ 1499 #define ARM_TTE_BLOCK_NXMASK 0x0040000000000000ULL /* no execute mask */ 1500 1501 #define ARM_TTE_BLOCK_WIRED 0x0400000000000000ULL /* value for software wired bit */ 1502 #define ARM_TTE_BLOCK_WIREDMASK 0x0400000000000000ULL /* software wired mask */ 1503 1504 #define ARM_TTE_BLOCK_WRITEABLE 0x0800000000000000ULL /* value for software writeable bit */ 1505 #define ARM_TTE_BLOCK_WRITEABLEMASK 0x0800000000000000ULL /* software writeable mask */ 1506 1507 #define ARM_TTE_TABLE_MASK 0x0000fffffffff000ULL /* mask for extracting pointer to next table (works at any level) */ 1508 1509 #define ARM_TTE_TABLE_APSHIFT 61 1510 #define ARM_TTE_TABLE_AP_NO_EFFECT 0x0ULL 1511 #define ARM_TTE_TABLE_AP_USER_NA 0x1ULL 1512 #define ARM_TTE_TABLE_AP_RO 0x2ULL 1513 #define ARM_TTE_TABLE_AP_KERN_RO 0x3ULL 1514 #define ARM_TTE_TABLE_AP(x) ((x) << ARM_TTE_TABLE_APSHIFT) /* access protection */ 1515 1516 #define ARM_TTE_TABLE_NS 0x8000000000000020ULL /* value for a secure mapping */ 1517 #define ARM_TTE_TABLE_NS_MASK 0x8000000000000020ULL /* notSecure mapping mask */ 1518 1519 #define ARM_TTE_TABLE_XN 0x1000000000000000ULL /* value for no execute */ 1520 #define ARM_TTE_TABLE_XNMASK 0x1000000000000000ULL /* no execute mask */ 1521 1522 #define ARM_TTE_TABLE_PXN 0x0800000000000000ULL /* value for privilege no execute bit */ 1523 #define ARM_TTE_TABLE_PXNMASK 0x0800000000000000ULL /* privilege execute mask */ 1524 1525 #if __ARM_KERNEL_PROTECT__ 1526 #define ARM_TTE_BOOT_BLOCK \ 1527 (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY) | \ 1528 ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | ARM_TTE_BLOCK_AF | ARM_TTE_BLOCK_NG) 1529 #else /* __ARM_KERNEL_PROTECT__ */ 1530 #define ARM_TTE_BOOT_BLOCK \ 1531 (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID | ARM_TTE_BLOCK_SH(SH_OUTER_MEMORY) | \ 1532 ARM_TTE_BLOCK_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | ARM_TTE_BLOCK_AF) 1533 #endif /* __ARM_KERNEL_PROTECT__ */ 1534 1535 #define ARM_TTE_BOOT_TABLE (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID ) 1536 /* 1537 * L3 Translation table 1538 * 1539 * 4KB granule size: 1540 * Each translation table is 4KB 1541 * 512 64-bit entries of 4KB (2^12) of address space. 1542 * Covers 2MB (2^21) of address space. 1543 * 1544 * 16KB granule size: 1545 * Each translation table is 16KB 1546 * 2048 64-bit entries of 16KB (2^14) of address space. 1547 * Covers 32MB (2^25) of address space. 1548 */ 1549 1550 #ifdef __ARM_16K_PG__ 1551 #define ARM_PTE_SIZE 0x0000000000004000ULL /* size of area covered by a tte */ 1552 #define ARM_PTE_OFFMASK 0x0000000000003fffULL /* offset within pte area */ 1553 #define ARM_PTE_SHIFT 14 /* page descriptor shift */ 1554 #define ARM_PTE_MASK 0x0000ffffffffc000ULL /* mask for output address in PTE */ 1555 #else 1556 #define ARM_PTE_SIZE 0x0000000000001000ULL /* size of area covered by a tte */ 1557 #define ARM_PTE_OFFMASK 0x0000000000000fffULL /* offset within pte area */ 1558 #define ARM_PTE_SHIFT 12 /* page descriptor shift */ 1559 #define ARM_PTE_MASK 0x0000fffffffff000ULL /* mask for output address in PTE */ 1560 #endif 1561 1562 #define ARM_TTE_PA_MASK 0x0000fffffffff000ULL 1563 1564 /* 1565 * L3 Page table entries 1566 * 1567 * The following page table entry types are possible: 1568 * 1569 * fault page entry 1570 * 63 2 0 1571 * +------------------------------+--+ 1572 * | ignored |00| 1573 * +------------------------------+--+ 1574 * 1575 * 1576 * 63 59 58 55 54 53 52 51 48 47 12 11 10 9 8 7 6 5 4 2 1 0 1577 * +-----+------+--+---+----+------+----------------------+--+--+----+----+--+-------+-+-+ 1578 * | ign |sw use|XN|PXN|HINT| zero | OutputAddress[47:12] |nG|AF| SH | AP |NS|AttrIdx|1|V| 1579 * +-----+------+--+---+----+------+----------------------+--+--+----+----+--+-------+-+-+ 1580 * 1581 * where: 1582 * nG: notGlobal bit 1583 * SH: Shareability field 1584 * AP: access protection 1585 * XN: eXecute Never bit 1586 * PXN: Privilege eXecute Never bit 1587 * NS: Non-Secure bit 1588 * HINT: 16 entry continuguous output hint 1589 * AttrIdx: Memory Attribute Index 1590 */ 1591 1592 #define PTE_SHIFT 3 /* shift width of a pte (sizeof(pte) == (1 << PTE_SHIFT)) */ 1593 #ifdef __ARM_16K_PG__ 1594 #define PTE_PGENTRIES (16384 >> PTE_SHIFT) /* number of ptes per page */ 1595 #else 1596 #define PTE_PGENTRIES (4096 >> PTE_SHIFT) /* number of ptes per page */ 1597 #endif 1598 1599 #define ARM_PTE_EMPTY 0x0000000000000000ULL /* unassigned - invalid entry */ 1600 1601 /* markers for (invalid) PTE for a page sent to compressor */ 1602 #define ARM_PTE_COMPRESSED 0x8000000000000000ULL /* compressed... */ 1603 #define ARM_PTE_COMPRESSED_ALT 0x4000000000000000ULL /* ... and was "alt_acct" */ 1604 #define ARM_PTE_COMPRESSED_MASK 0xC000000000000000ULL 1605 1606 #define ARM_PTE_TYPE 0x0000000000000003ULL /* valid L3 entry: includes bit #1 (counterintuitively) */ 1607 #define ARM_PTE_TYPE_VALID 0x0000000000000003ULL /* valid L3 entry: includes bit #1 (counterintuitively) */ 1608 #define ARM_PTE_TYPE_FAULT 0x0000000000000000ULL /* invalid L3 entry */ 1609 #define ARM_PTE_TYPE_MASK 0x0000000000000002ULL /* mask to get pte type */ 1610 1611 /* This mask works for both 16K and 4K pages because bits 12-13 will be zero in 16K pages */ 1612 #define ARM_PTE_PAGE_MASK 0x0000FFFFFFFFF000ULL /* output address mask for page */ 1613 #define ARM_PTE_PAGE_SHIFT 12 /* page shift for the output address in the entry */ 1614 1615 #define ARM_PTE_AP(x) ((x) << 6) /* access protections */ 1616 #define ARM_PTE_APMASK (0x3ULL << 6) /* mask access protections */ 1617 #define ARM_PTE_EXTRACT_AP(x) (((x) >> 6) & 0x3ULL) /* extract access protections from PTE */ 1618 1619 #define ARM_PTE_ATTRINDX(x) ((x) << 2) /* memory attributes index */ 1620 #define ARM_PTE_ATTRINDXMASK (0x7ULL << 2) /* mask memory attributes index */ 1621 1622 #define ARM_PTE_SH(x) ((x) << 8) /* access shared */ 1623 #define ARM_PTE_SHMASK (0x3ULL << 8) /* mask access shared */ 1624 1625 #define ARM_PTE_AF 0x0000000000000400ULL /* value for access */ 1626 #define ARM_PTE_AFMASK 0x0000000000000400ULL /* access mask */ 1627 1628 #define ARM_PTE_NG 0x0000000000000800ULL /* value for a global mapping */ 1629 #define ARM_PTE_NG_MASK 0x0000000000000800ULL /* notGlobal mapping mask */ 1630 1631 #define ARM_PTE_NS 0x0000000000000020ULL /* value for a secure mapping */ 1632 #define ARM_PTE_NS_MASK 0x0000000000000020ULL /* notSecure mapping mask */ 1633 1634 #define ARM_PTE_HINT 0x0010000000000000ULL /* value for contiguous entries hint */ 1635 #define ARM_PTE_HINT_MASK 0x0010000000000000ULL /* mask for contiguous entries hint */ 1636 1637 #if __ARM_16K_PG__ 1638 #define ARM_PTE_HINT_ENTRIES 128ULL /* number of entries the hint covers */ 1639 #define ARM_PTE_HINT_ENTRIES_SHIFT 7ULL /* shift to construct the number of entries */ 1640 #define ARM_PTE_HINT_ADDR_MASK 0x0000FFFFFFE00000ULL /* mask to extract the starting hint address */ 1641 #define ARM_PTE_HINT_ADDR_SHIFT 21 /* shift for the hint address */ 1642 #define ARM_KVA_HINT_ADDR_MASK 0xFFFFFFFFFFE00000ULL /* mask to extract the starting hint address */ 1643 #else 1644 #define ARM_PTE_HINT_ENTRIES 16ULL /* number of entries the hint covers */ 1645 #define ARM_PTE_HINT_ENTRIES_SHIFT 4ULL /* shift to construct the number of entries */ 1646 #define ARM_PTE_HINT_ADDR_MASK 0x0000FFFFFFFF0000ULL /* mask to extract the starting hint address */ 1647 #define ARM_PTE_HINT_ADDR_SHIFT 16 /* shift for the hint address */ 1648 #define ARM_KVA_HINT_ADDR_MASK 0xFFFFFFFFFFFF0000ULL /* mask to extract the starting hint address */ 1649 #endif 1650 1651 #define ARM_PTE_PNX 0x0020000000000000ULL /* value for privilege no execute bit */ 1652 #define ARM_PTE_PNXMASK 0x0020000000000000ULL /* privilege no execute mask */ 1653 1654 #define ARM_PTE_NX 0x0040000000000000ULL /* value for no execute bit */ 1655 #define ARM_PTE_NXMASK 0x0040000000000000ULL /* no execute mask */ 1656 1657 #define ARM_PTE_XMASK (ARM_PTE_PNXMASK | ARM_PTE_NXMASK) 1658 1659 #define ARM_PTE_WIRED 0x0400000000000000ULL /* value for software wired bit */ 1660 #define ARM_PTE_WIRED_MASK 0x0400000000000000ULL /* software wired mask */ 1661 1662 #define ARM_PTE_WRITEABLE 0x0800000000000000ULL /* value for software writeable bit */ 1663 #define ARM_PTE_WRITEABLE_MASK 0x0800000000000000ULL /* software writeable mask */ 1664 1665 #define ARM_PTE_BOOT_PAGE_BASE \ 1666 (ARM_PTE_TYPE_VALID | ARM_PTE_SH(SH_OUTER_MEMORY) | \ 1667 ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) | ARM_PTE_AF) 1668 1669 #if __ARM_KERNEL_PROTECT__ 1670 #define ARM_PTE_BOOT_PAGE (ARM_PTE_BOOT_PAGE_BASE | ARM_PTE_NG) 1671 #else /* __ARM_KERNEL_PROTECT__ */ 1672 #define ARM_PTE_BOOT_PAGE (ARM_PTE_BOOT_PAGE_BASE) 1673 #endif /* __ARM_KERNEL_PROTECT__ */ 1674 1675 /* 1676 * TLBI appers to only deal in 4KB page addresses, so give 1677 * it an explicit shift of 12. 1678 */ 1679 #define TLBI_ADDR_SHIFT (0) 1680 #define TLBI_ADDR_SIZE (44) 1681 #define TLBI_ADDR_MASK ((1ULL << TLBI_ADDR_SIZE) - 1) 1682 #define TLBI_ASID_SHIFT (48) 1683 #define TLBI_ASID_SIZE (16) 1684 #define TLBI_ASID_MASK (((1ULL << TLBI_ASID_SIZE) - 1)) 1685 1686 #define RTLBI_ADDR_SIZE (37) 1687 #define RTLBI_ADDR_MASK ((1ULL << RTLBI_ADDR_SIZE) - 1) 1688 #define RTLBI_ADDR_SHIFT ARM_TT_L3_SHIFT 1689 #define RTLBI_TG(_page_shift_) ((uint64_t)((((_page_shift_) - 12) >> 1) + 1) << 46) 1690 #define RTLBI_SCALE_SHIFT (44) 1691 #define RTLBI_NUM_SHIFT (39) 1692 1693 /* 1694 * Exception Syndrome Register 1695 * 1696 * 31 26 25 24 0 1697 * +------+--+------------------+ 1698 * | EC |IL| ISS | 1699 * +------+--+------------------+ 1700 * 1701 * EC - Exception Class 1702 * IL - Instruction Length 1703 * ISS - Instruction Specific Syndrome 1704 * 1705 * Note: The ISS can have many forms. These are defined separately below. 1706 */ 1707 1708 #define ESR_EC_SHIFT 26 1709 #define ESR_EC_MASK (0x3FULL << ESR_EC_SHIFT) 1710 #define ESR_EC(x) ((x & ESR_EC_MASK) >> ESR_EC_SHIFT) 1711 1712 #define ESR_IL_SHIFT 25 1713 #define ESR_IL (1 << ESR_IL_SHIFT) 1714 1715 #define ESR_INSTR_IS_2BYTES(x) (!(x & ESR_IL)) 1716 1717 #define ESR_ISS_MASK 0x01FFFFFF 1718 #define ESR_ISS(x) (x & ESR_ISS_MASK) 1719 1720 #ifdef __ASSEMBLER__ 1721 /* Define only the classes we need to test in the exception vectors. */ 1722 #define ESR_EC_IABORT_EL1 0x21 1723 #define ESR_EC_DABORT_EL1 0x25 1724 #define ESR_EC_SP_ALIGN 0x26 1725 #else 1726 typedef enum { 1727 ESR_EC_UNCATEGORIZED = 0x00, 1728 ESR_EC_WFI_WFE = 0x01, 1729 ESR_EC_MCR_MRC_CP15_TRAP = 0x03, 1730 ESR_EC_MCRR_MRRC_CP15_TRAP = 0x04, 1731 ESR_EC_MCR_MRC_CP14_TRAP = 0x05, 1732 ESR_EC_LDC_STC_CP14_TRAP = 0x06, 1733 ESR_EC_TRAP_SIMD_FP = 0x07, 1734 ESR_EC_PTRAUTH_INSTR_TRAP = 0x09, 1735 ESR_EC_MCRR_MRRC_CP14_TRAP = 0x0c, 1736 ESR_EC_ILLEGAL_INSTR_SET = 0x0e, 1737 ESR_EC_SVC_32 = 0x11, 1738 ESR_EC_HVC_32 = 0x12, 1739 ESR_EC_SVC_64 = 0x15, 1740 ESR_EC_HVC_64 = 0x16, 1741 ESR_EC_MSR_TRAP = 0x18, 1742 #if __has_feature(ptrauth_calls) 1743 ESR_EC_PAC_FAIL = 0x1C, 1744 #endif /* __has_feature(ptrauth_calls) */ 1745 ESR_EC_IABORT_EL0 = 0x20, 1746 ESR_EC_IABORT_EL1 = 0x21, 1747 ESR_EC_PC_ALIGN = 0x22, 1748 ESR_EC_DABORT_EL0 = 0x24, 1749 ESR_EC_DABORT_EL1 = 0x25, 1750 ESR_EC_SP_ALIGN = 0x26, 1751 ESR_EC_FLOATING_POINT_32 = 0x28, 1752 ESR_EC_FLOATING_POINT_64 = 0x2C, 1753 ESR_EC_SERROR_INTERRUPT = 0x2F, 1754 ESR_EC_BKPT_REG_MATCH_EL0 = 0x30, // Breakpoint Debug event taken to the EL from a lower EL. 1755 ESR_EC_BKPT_REG_MATCH_EL1 = 0x31, // Breakpoint Debug event taken to the EL from the EL. 1756 ESR_EC_SW_STEP_DEBUG_EL0 = 0x32, // Software Step Debug event taken to the EL from a lower EL. 1757 ESR_EC_SW_STEP_DEBUG_EL1 = 0x33, // Software Step Debug event taken to the EL from the EL. 1758 ESR_EC_WATCHPT_MATCH_EL0 = 0x34, // Watchpoint Debug event taken to the EL from a lower EL. 1759 ESR_EC_WATCHPT_MATCH_EL1 = 0x35, // Watchpoint Debug event taken to the EL from the EL. 1760 ESR_EC_BKPT_AARCH32 = 0x38, 1761 ESR_EC_BRK_AARCH64 = 0x3C, 1762 } esr_exception_class_t; 1763 1764 typedef enum { 1765 FSC_TRANSLATION_FAULT_L0 = 0x04, 1766 FSC_TRANSLATION_FAULT_L1 = 0x05, 1767 FSC_TRANSLATION_FAULT_L2 = 0x06, 1768 FSC_TRANSLATION_FAULT_L3 = 0x07, 1769 FSC_ACCESS_FLAG_FAULT_L1 = 0x09, 1770 FSC_ACCESS_FLAG_FAULT_L2 = 0x0A, 1771 FSC_ACCESS_FLAG_FAULT_L3 = 0x0B, 1772 FSC_PERMISSION_FAULT_L1 = 0x0D, 1773 FSC_PERMISSION_FAULT_L2 = 0x0E, 1774 FSC_PERMISSION_FAULT_L3 = 0x0F, 1775 FSC_SYNC_EXT_ABORT = 0x10, 1776 FSC_ASYNC_EXT_ABORT = 0x11, 1777 FSC_SYNC_EXT_ABORT_TT_L1 = 0x15, 1778 FSC_SYNC_EXT_ABORT_TT_L2 = 0x16, 1779 FSC_SYNC_EXT_ABORT_TT_L3 = 0x17, 1780 FSC_SYNC_PARITY = 0x18, 1781 FSC_ASYNC_PARITY = 0x19, 1782 FSC_SYNC_PARITY_TT_L1 = 0x1D, 1783 FSC_SYNC_PARITY_TT_L2 = 0x1E, 1784 FSC_SYNC_PARITY_TT_L3 = 0x1F, 1785 FSC_ALIGNMENT_FAULT = 0x21, 1786 FSC_DEBUG_FAULT = 0x22, 1787 } fault_status_t; 1788 #endif /* ASSEMBLER */ 1789 1790 /* 1791 * HVC event 1792 * 24 16 15 0 1793 * +---------+-----+ 1794 * |000000000| IMM | 1795 * +---------+-----+ 1796 * 1797 * where: 1798 * IMM: Immediate value 1799 */ 1800 1801 #define ISS_HVC_IMM_MASK 0xffff 1802 #define ISS_HVC_IMM(x) ((x) & ISS_HVC_IMM_MASK) 1803 1804 /* 1805 * Software step debug event ISS (EL1) 1806 * 24 23 6 5 0 1807 * +---+-----------------+--+------+ 1808 * |ISV|00000000000000000|EX| IFSC | 1809 * +---+-----------------+--+------+ 1810 * 1811 * where: 1812 * ISV: Instruction syndrome valid 1813 * EX: Exclusive access 1814 * IFSC: Instruction Fault Status Code 1815 */ 1816 1817 #define ISS_SSDE_ISV_SHIFT 24 1818 #define ISS_SSDE_ISV (0x1 << ISS_SSDE_ISV_SHIFT) 1819 1820 #define ISS_SSDE_EX_SHIFT 6 1821 #define ISS_SSDE_EX (0x1 << ISS_SSDE_EX_SHIFT) 1822 1823 #define ISS_SSDE_FSC_MASK 0x3F 1824 #define ISS_SSDE_FSC(x) (x & ISS_SSDE_FSC_MASK) 1825 1826 /* 1827 * Instruction Abort ISS (EL1) 1828 * 24 10 9 5 0 1829 * +---------------+--+---+------+ 1830 * |000000000000000|EA|000| IFSC | 1831 * +---------------+--+---+------+ 1832 * 1833 * where: 1834 * EA: External Abort type 1835 * IFSC: Instruction Fault Status Code 1836 */ 1837 1838 #define ISS_IA_EA_SHIFT 9 1839 #define ISS_IA_EA (0x1 << ISS_IA_EA_SHIFT) 1840 1841 #define ISS_IA_FSC_MASK 0x3F 1842 #define ISS_IA_FSC(x) (x & ISS_IA_FSC_MASK) 1843 1844 1845 /* 1846 * Data Abort ISS (EL1) 1847 * 1848 * 24 9 8 7 6 5 0 1849 * +---------------+--+--+-+---+----+ 1850 * |000000000000000|EA|CM|S1PTW|WnR|DFSC| 1851 * +---------------+--+--+-+---+----+ 1852 * 1853 * where: 1854 * EA: External Abort type 1855 * CM: Cache Maintenance operation 1856 * WnR: Write not Read 1857 * S1PTW: Stage 2 exception on Stage 1 page table walk 1858 * DFSC: Data Fault Status Code 1859 */ 1860 #define ISS_DA_EA_SHIFT 9 1861 #define ISS_DA_EA (0x1 << ISS_DA_EA_SHIFT) 1862 1863 #define ISS_DA_CM_SHIFT 8 1864 #define ISS_DA_CM (0x1 << ISS_DA_CM_SHIFT) 1865 1866 #define ISS_DA_WNR_SHIFT 6 1867 #define ISS_DA_WNR (0x1 << ISS_DA_WNR_SHIFT) 1868 1869 #define ISS_DA_S1PTW_SHIFT 7 1870 #define ISS_DA_S1PTW (0x1 << ISS_DA_S1PTW_SHIFT) 1871 1872 #define ISS_DA_FSC_MASK 0x3F 1873 #define ISS_DA_FSC(x) (x & ISS_DA_FSC_MASK) 1874 1875 /* 1876 * Floating Point Exception ISS (EL1) 1877 * 1878 * 24 23 22 8 7 4 3 2 1 0 1879 * +-+---+---------------+---+--+---+---+---+---+---+ 1880 * |0|TFV|000000000000000|IDF|00|IXF|UFF|OFF|DZF|IOF| 1881 * +-+---+---------------+---+--+---+---+---+---+---+ 1882 * 1883 * where: 1884 * TFV: Trapped Fault Valid 1885 * IDF: Input Denormal Exception 1886 * IXF: Input Inexact Exception 1887 * UFF: Underflow Exception 1888 * OFF: Overflow Exception 1889 * DZF: Divide by Zero Exception 1890 * IOF: Invalid Operation Exception 1891 */ 1892 #define ISS_FP_TFV_SHIFT 23 1893 #define ISS_FP_TFV (0x1 << ISS_FP_TFV_SHIFT) 1894 1895 #define ISS_FP_IDF_SHIFT 7 1896 #define ISS_FP_IDF (0x1 << ISS_FP_IDF_SHIFT) 1897 1898 #define ISS_FP_IXF_SHIFT 4 1899 #define ISS_FP_IXF (0x1 << ISS_FP_IXF_SHIFT) 1900 1901 #define ISS_FP_UFF_SHIFT 3 1902 #define ISS_FP_UFF (0x1 << ISS_FP_UFF_SHIFT) 1903 1904 #define ISS_FP_OFF_SHIFT 2 1905 #define ISS_FP_OFF (0x1 << ISS_FP_OFF_SHIFT) 1906 1907 #define ISS_FP_DZF_SHIFT 1 1908 #define ISS_FP_DZF (0x1 << ISS_FP_DZF_SHIFT) 1909 1910 #define ISS_FP_IOF_SHIFT 0 1911 #define ISS_FP_IOF (0x1 << ISS_FP_IOF_SHIFT) 1912 1913 /* 1914 * Breakpoint Exception ISS (EL1) 1915 * 24 16 0 1916 * +---------+---------+ 1917 * |000000000| Comment | 1918 * +---------+---------+ 1919 * 1920 * where: 1921 * Comment: Instruction Comment Field Value 1922 */ 1923 #define ISS_BRK_COMMENT_MASK 0xFFFF 1924 #define ISS_BRK_COMMENT(x) (x & ISS_BRK_COMMENT_MASK) 1925 1926 1927 #if HAS_UCNORMAL_MEM 1928 #define ISS_UC 0x11 1929 #endif /* HAS_UCNORMAL_MEM */ 1930 1931 1932 1933 /* 1934 * Physical Address Register (EL1) 1935 */ 1936 #define PAR_F_SHIFT 0 1937 #define PAR_F (0x1 << PAR_F_SHIFT) 1938 1939 #define PLATFORM_SYSCALL_TRAP_NO 0x80000000 1940 1941 #define ARM64_SYSCALL_CODE_REG_NUM (16) 1942 1943 #define ARM64_CLINE_SHIFT 6 1944 1945 #if defined(APPLE_ARM64_ARCH_FAMILY) 1946 #define L2CERRSTS_DATSBEESV (1ULL << 2) /* L2C data single bit ECC error */ 1947 #define L2CERRSTS_DATDBEESV (1ULL << 4) /* L2C data double bit ECC error */ 1948 #endif 1949 1950 /* 1951 * Timer definitions. 1952 */ 1953 #define CNTKCTL_EL1_PL0PTEN (0x1 << 9) /* 1: EL0 access to physical timer regs permitted */ 1954 #define CNTKCTL_EL1_PL0VTEN (0x1 << 8) /* 1: EL0 access to virtual timer regs permitted */ 1955 #define CNTKCTL_EL1_EVENTI_MASK (0x000000f0) /* Mask for bits describing which bit to use for triggering event stream */ 1956 #define CNTKCTL_EL1_EVENTI_SHIFT (0x4) /* Shift for same */ 1957 #define CNTKCTL_EL1_EVENTDIR (0x1 << 3) /* 1: one-to-zero transition of specified bit causes event */ 1958 #define CNTKCTL_EL1_EVNTEN (0x1 << 2) /* 1: enable event stream */ 1959 #define CNTKCTL_EL1_PL0VCTEN (0x1 << 1) /* 1: EL0 access to virtual timebase + frequency reg enabled */ 1960 #define CNTKCTL_EL1_PL0PCTEN (0x1 << 0) /* 1: EL0 access to physical timebase + frequency reg enabled */ 1961 1962 #define CNTV_CTL_EL0_ISTATUS (0x1 << 2) /* (read only): whether interrupt asserted */ 1963 #define CNTV_CTL_EL0_IMASKED (0x1 << 1) /* 1: interrupt masked */ 1964 #define CNTV_CTL_EL0_ENABLE (0x1 << 0) /* 1: virtual timer enabled */ 1965 1966 #define CNTP_CTL_EL0_ISTATUS CNTV_CTL_EL0_ISTATUS 1967 #define CNTP_CTL_EL0_IMASKED CNTV_CTL_EL0_IMASKED 1968 #define CNTP_CTL_EL0_ENABLE CNTV_CTL_EL0_ENABLE 1969 1970 #define MIDR_EL1_REV_SHIFT 0 1971 #define MIDR_EL1_REV_MASK (0xf << MIDR_EL1_REV_SHIFT) 1972 #define MIDR_EL1_PNUM_SHIFT 4 1973 #define MIDR_EL1_PNUM_MASK (0xfff << MIDR_EL1_PNUM_SHIFT) 1974 #define MIDR_EL1_ARCH_SHIFT 16 1975 #define MIDR_EL1_ARCH_MASK (0xf << MIDR_EL1_ARCH_SHIFT) 1976 #define MIDR_EL1_VAR_SHIFT 20 1977 #define MIDR_EL1_VAR_MASK (0xf << MIDR_EL1_VAR_SHIFT) 1978 #define MIDR_EL1_IMP_SHIFT 24 1979 #define MIDR_EL1_IMP_MASK (0xff << MIDR_EL1_IMP_SHIFT) 1980 1981 #define MIDR_FIJI (0x002 << MIDR_EL1_PNUM_SHIFT) 1982 #define MIDR_CAPRI (0x003 << MIDR_EL1_PNUM_SHIFT) 1983 #define MIDR_MAUI (0x004 << MIDR_EL1_PNUM_SHIFT) 1984 #define MIDR_ELBA (0x005 << MIDR_EL1_PNUM_SHIFT) 1985 #define MIDR_CAYMAN (0x006 << MIDR_EL1_PNUM_SHIFT) 1986 #define MIDR_MYST (0x007 << MIDR_EL1_PNUM_SHIFT) 1987 #define MIDR_SKYE_MONSOON (0x008 << MIDR_EL1_PNUM_SHIFT) 1988 #define MIDR_SKYE_MISTRAL (0x009 << MIDR_EL1_PNUM_SHIFT) 1989 #define MIDR_CYPRUS_VORTEX (0x00B << MIDR_EL1_PNUM_SHIFT) 1990 #define MIDR_CYPRUS_TEMPEST (0x00C << MIDR_EL1_PNUM_SHIFT) 1991 #define MIDR_M9 (0x00F << MIDR_EL1_PNUM_SHIFT) 1992 #define MIDR_ARUBA_VORTEX (0x010 << MIDR_EL1_PNUM_SHIFT) 1993 #define MIDR_ARUBA_TEMPEST (0x011 << MIDR_EL1_PNUM_SHIFT) 1994 1995 #ifdef APPLELIGHTNING 1996 #define MIDR_CEBU_LIGHTNING (0x012 << MIDR_EL1_PNUM_SHIFT) 1997 #define MIDR_CEBU_THUNDER (0x013 << MIDR_EL1_PNUM_SHIFT) 1998 #define MIDR_TURKS (0x026 << MIDR_EL1_PNUM_SHIFT) 1999 #endif 2000 2001 #ifdef APPLEFIRESTORM 2002 #define MIDR_SICILY_ICESTORM (0x020 << MIDR_EL1_PNUM_SHIFT) 2003 #define MIDR_SICILY_FIRESTORM (0x021 << MIDR_EL1_PNUM_SHIFT) 2004 #define MIDR_TONGA_ICESTORM (0x022 << MIDR_EL1_PNUM_SHIFT) 2005 #define MIDR_TONGA_FIRESTORM (0x023 << MIDR_EL1_PNUM_SHIFT) 2006 #define MIDR_JADE_CHOP_ICESTORM (0x024 << MIDR_EL1_PNUM_SHIFT) 2007 #define MIDR_JADE_CHOP_FIRESTORM (0x025 << MIDR_EL1_PNUM_SHIFT) 2008 #define MIDR_JADE_DIE_ICESTORM (0x028 << MIDR_EL1_PNUM_SHIFT) 2009 #define MIDR_JADE_DIE_FIRESTORM (0x029 << MIDR_EL1_PNUM_SHIFT) 2010 #endif 2011 2012 2013 2014 /* 2015 * Apple-ISA-Extensions ID Register. 2016 */ 2017 #define AIDR_MUL53 (1 << 0) 2018 #define AIDR_WKDM (1 << 1) 2019 #define AIDR_ARCHRETENTION (1 << 2) 2020 2021 2022 /* 2023 * CoreSight debug registers 2024 */ 2025 #define CORESIGHT_ED 0 2026 #define CORESIGHT_CTI 1 2027 #define CORESIGHT_PMU 2 2028 #define CORESIGHT_UTT 3 /* Not truly a coresight thing, but at a fixed convenient location right after the coresight region */ 2029 2030 #define CORESIGHT_OFFSET(x) ((x) * 0x10000) 2031 #define CORESIGHT_REGIONS 4 2032 #define CORESIGHT_SIZE 0x1000 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 /* 2044 * ID_AA64ISAR0_EL1 - AArch64 Instruction Set Attribute Register 0 2045 * 2046 * 63 60 59 56 55 52 51 48 47 44 43 40 39 36 35 32 31 28 27 24 23 20 19 16 15 12 11 8 7 4 3 0 2047 * +--------+-------+------+-------+------+-------+-------+------+-------+--------+--------+-------+------+------+-----+------+ 2048 * | rndr | tlb | ts | fhm | dp | sm4 | sm3 | sha3 | rdm | res0 | atomic | crc32 | sha2 | sha1 | aes | res0 | 2049 * +--------+-------+------+-------+------+-------+-------+------+-------+--------+--------+-------+------+------+-----+------+ 2050 */ 2051 2052 #define ID_AA64ISAR0_EL1_TS_OFFSET 52 2053 #define ID_AA64ISAR0_EL1_TS_MASK (0xfull << ID_AA64ISAR0_EL1_TS_OFFSET) 2054 #define ID_AA64ISAR0_EL1_TS_FLAGM_EN (1ull << ID_AA64ISAR0_EL1_TS_OFFSET) 2055 #define ID_AA64ISAR0_EL1_TS_FLAGM2_EN (2ull << ID_AA64ISAR0_EL1_TS_OFFSET) 2056 2057 #define ID_AA64ISAR0_EL1_FHM_OFFSET 48 2058 #define ID_AA64ISAR0_EL1_FHM_MASK (0xfull << ID_AA64ISAR0_EL1_FHM_OFFSET) 2059 #define ID_AA64ISAR0_EL1_FHM_8_2 (1ull << ID_AA64ISAR0_EL1_FHM_OFFSET) 2060 2061 #define ID_AA64ISAR0_EL1_DP_OFFSET 44 2062 #define ID_AA64ISAR0_EL1_DP_MASK (0xfull << ID_AA64ISAR0_EL1_DP_OFFSET) 2063 #define ID_AA64ISAR0_EL1_DP_EN (1ull << ID_AA64ISAR0_EL1_DP_OFFSET) 2064 2065 #define ID_AA64ISAR0_EL1_SHA3_OFFSET 32 2066 #define ID_AA64ISAR0_EL1_SHA3_MASK (0xfull << ID_AA64ISAR0_EL1_SHA3_OFFSET) 2067 #define ID_AA64ISAR0_EL1_SHA3_EN (1ull << ID_AA64ISAR0_EL1_SHA3_OFFSET) 2068 2069 #define ID_AA64ISAR0_EL1_RDM_OFFSET 28 2070 #define ID_AA64ISAR0_EL1_RDM_MASK (0xfull << ID_AA64ISAR0_EL1_RDM_OFFSET) 2071 #define ID_AA64ISAR0_EL1_RDM_EN (1ull << ID_AA64ISAR0_EL1_RDM_OFFSET) 2072 2073 #define ID_AA64ISAR0_EL1_ATOMIC_OFFSET 20 2074 #define ID_AA64ISAR0_EL1_ATOMIC_MASK (0xfull << ID_AA64ISAR0_EL1_ATOMIC_OFFSET) 2075 #define ID_AA64ISAR0_EL1_ATOMIC_8_1 (2ull << ID_AA64ISAR0_EL1_ATOMIC_OFFSET) 2076 2077 #define ID_AA64ISAR0_EL1_CRC32_OFFSET 16 2078 #define ID_AA64ISAR0_EL1_CRC32_MASK (0xfull << ID_AA64ISAR0_EL1_CRC32_OFFSET) 2079 #define ID_AA64ISAR0_EL1_CRC32_EN (1ull << ID_AA64ISAR0_EL1_CRC32_OFFSET) 2080 2081 #define ID_AA64ISAR0_EL1_SHA2_OFFSET 12 2082 #define ID_AA64ISAR0_EL1_SHA2_MASK (0xfull << ID_AA64ISAR0_EL1_SHA2_OFFSET) 2083 #define ID_AA64ISAR0_EL1_SHA2_EN (1ull << ID_AA64ISAR0_EL1_SHA2_OFFSET) 2084 #define ID_AA64ISAR0_EL1_SHA2_512_EN (2ull << ID_AA64ISAR0_EL1_SHA2_OFFSET) 2085 2086 #define ID_AA64ISAR0_EL1_SHA1_OFFSET 8 2087 #define ID_AA64ISAR0_EL1_SHA1_MASK (0xfull << ID_AA64ISAR0_EL1_SHA1_OFFSET) 2088 #define ID_AA64ISAR0_EL1_SHA1_EN (1ull << ID_AA64ISAR0_EL1_SHA1_OFFSET) 2089 2090 #define ID_AA64ISAR0_EL1_AES_OFFSET 4 2091 #define ID_AA64ISAR0_EL1_AES_MASK (0xfull << ID_AA64ISAR0_EL1_AES_OFFSET) 2092 #define ID_AA64ISAR0_EL1_AES_EN (1ull << ID_AA64ISAR0_EL1_AES_OFFSET) 2093 #define ID_AA64ISAR0_EL1_AES_PMULL_EN (2ull << ID_AA64ISAR0_EL1_AES_OFFSET) 2094 2095 /* 2096 * ID_AA64ISAR1_EL1 - AArch64 Instruction Set Attribute Register 1 2097 * 2098 * 63 56 55 52 51 48 47 44 43 40 39 36 35 32 31 28 27 24 23 20 19 16 15 12 11 8 7 4 3 0 2099 * +------+------+-----+------+---------+------+---------+-----+-----+-------+------+-------+-----+-----+-----+ 2100 * | res0 | i8mm | dgh | bf16 | specres | sb | frintts | gpi | gpa | lrcpc | fcma | jscvt | api | apa | dpb | 2101 * +------+------+-----+------+---------+------+---------+-----+-----+-------+------+-------+-----+-----+-----+ 2102 */ 2103 2104 #define ID_AA64ISAR1_EL1_I8MM_OFFSET 52 2105 #define ID_AA64ISAR1_EL1_I8MM_MASK (0xfull << ID_AA64ISAR1_EL1_I8MM_OFFSET) 2106 #define ID_AA64ISAR1_EL1_I8MM_EN (1ull << ID_AA64ISAR1_EL1_I8MM_OFFSET) 2107 2108 #define ID_AA64ISAR1_EL1_DGH_OFFSET 48 2109 #define ID_AA64ISAR1_EL1_DGH_MASK (0xfull << ID_AA64ISAR1_EL1_DGH_OFFSET) 2110 2111 #define ID_AA64ISAR1_EL1_BF16_OFFSET 44 2112 #define ID_AA64ISAR1_EL1_BF16_MASK (0xfull << ID_AA64ISAR1_EL1_BF16_OFFSET) 2113 #define ID_AA64ISAR1_EL1_BF16_EN (1ull << ID_AA64ISAR1_EL1_BF16_OFFSET) 2114 2115 #define ID_AA64ISAR1_EL1_SPECRES_OFFSET 40 2116 #define ID_AA64ISAR1_EL1_SPECRES_MASK (0xfull << ID_AA64ISAR1_EL1_SPECRES_OFFSET) 2117 #define ID_AA64ISAR1_EL1_SPECRES_EN (1ull << ID_AA64ISAR1_EL1_SPECRES_OFFSET) 2118 2119 #define ID_AA64ISAR1_EL1_SB_OFFSET 36 2120 #define ID_AA64ISAR1_EL1_SB_MASK (0xfull << ID_AA64ISAR1_EL1_SB_OFFSET) 2121 #define ID_AA64ISAR1_EL1_SB_EN (1ull << ID_AA64ISAR1_EL1_SB_OFFSET) 2122 2123 #define ID_AA64ISAR1_EL1_FRINTTS_OFFSET 32 2124 #define ID_AA64ISAR1_EL1_FRINTTS_MASK (0xfull << ID_AA64ISAR1_EL1_FRINTTS_OFFSET) 2125 #define ID_AA64ISAR1_EL1_FRINTTS_EN (1ull << ID_AA64ISAR1_EL1_FRINTTS_OFFSET) 2126 2127 #define ID_AA64ISAR1_EL1_GPI_OFFSET 28 2128 #define ID_AA64ISAR1_EL1_GPI_MASK (0xfull << ID_AA64ISAR1_EL1_GPI_OFFSET) 2129 #define ID_AA64ISAR1_EL1_GPI_EN (1ull << ID_AA64ISAR1_EL1_GPI_OFFSET) 2130 2131 #define ID_AA64ISAR1_EL1_GPA_OFFSET 24 2132 #define ID_AA64ISAR1_EL1_GPA_MASK (0xfull << ID_AA64ISAR1_EL1_GPA_OFFSET) 2133 2134 #define ID_AA64ISAR1_EL1_LRCPC_OFFSET 20 2135 #define ID_AA64ISAR1_EL1_LRCPC_MASK (0xfull << ID_AA64ISAR1_EL1_LRCPC_OFFSET) 2136 #define ID_AA64ISAR1_EL1_LRCPC_EN (1ull << ID_AA64ISAR1_EL1_LRCPC_OFFSET) 2137 #define ID_AA64ISAR1_EL1_LRCP2C_EN (2ull << ID_AA64ISAR1_EL1_LRCPC_OFFSET) 2138 2139 #define ID_AA64ISAR1_EL1_FCMA_OFFSET 16 2140 #define ID_AA64ISAR1_EL1_FCMA_MASK (0xfull << ID_AA64ISAR1_EL1_FCMA_OFFSET) 2141 #define ID_AA64ISAR1_EL1_FCMA_EN (1ull << ID_AA64ISAR1_EL1_FCMA_OFFSET) 2142 2143 #define ID_AA64ISAR1_EL1_JSCVT_OFFSET 12 2144 #define ID_AA64ISAR1_EL1_JSCVT_MASK (0xfull << ID_AA64ISAR1_EL1_JSCVT_OFFSET) 2145 #define ID_AA64ISAR1_EL1_JSCVT_EN (1ull << ID_AA64ISAR1_EL1_JSCVT_OFFSET) 2146 2147 #define ID_AA64ISAR1_EL1_API_OFFSET 8 2148 #define ID_AA64ISAR1_EL1_API_MASK (0xfull << ID_AA64ISAR1_EL1_API_OFFSET) 2149 #define ID_AA64ISAR1_EL1_API_PAuth_EN (1ull << ID_AA64ISAR1_EL1_API_OFFSET) 2150 #define ID_AA64ISAR1_EL1_API_PAuth2_EN (3ull << ID_AA64ISAR1_EL1_API_OFFSET) 2151 #define ID_AA64ISAR1_EL1_API_FPAC_EN (4ull << ID_AA64ISAR1_EL1_API_OFFSET) 2152 2153 #define ID_AA64ISAR1_EL1_APA_OFFSET 4 2154 #define ID_AA64ISAR1_EL1_APA_MASK (0xfull << ID_AA64ISAR1_EL1_APA_OFFSET) 2155 2156 #define ID_AA64ISAR1_EL1_DPB_OFFSET 0 2157 #define ID_AA64ISAR1_EL1_DPB_MASK (0xfull << ID_AA64ISAR1_EL1_DPB_OFFSET) 2158 #define ID_AA64ISAR1_EL1_DPB_EN (1ull << ID_AA64ISAR1_EL1_DPB_OFFSET) 2159 #define ID_AA64ISAR1_EL1_DPB2_EN (2ull << ID_AA64ISAR1_EL1_DPB_OFFSET) 2160 2161 /* 2162 * ID_AA64ISAR2_EL1 - AArch64 Instruction Set Attribute Register 2 2163 * 2164 * 63 8 7 4 3 0 2165 * +------+-------+------+ 2166 * | res0 | RPRES | WFxT | 2167 * +------+-------+------+ 2168 */ 2169 2170 #define ID_AA64ISAR2_EL1_RPRES_OFFSET 4 2171 #define ID_AA64ISAR2_EL1_RPRES_MASK (0xfull << ID_AA64ISAR2_EL1_RPRES_OFFSET) 2172 #define ID_AA64ISAR2_EL1_RPRES_EN (1ull << ID_AA64ISAR2_EL1_RPRES_OFFSET) 2173 2174 #define ID_AA64ISAR2_EL1_WFxT_OFFSET 0 2175 #define ID_AA64ISAR2_EL1_WFxT_MASK (0xfull << ID_AA64ISAR2_EL1_WFxT_OFFSET) 2176 #define ID_AA64ISAR2_EL1_WFxT_EN (1ull << ID_AA64ISAR2_EL1_WFxT_OFFSET) 2177 2178 /* 2179 * ID_AA64MMFR0_EL1 - AArch64 Memory Model Feature Register 0 2180 * 63 60 59 56 55 48 47 44 43 40 39 36 35 32 31 28 27 24 23 20 19 16 15 12 11 8 7 4 3 0 2181 * +-------+-------+------------+-------+----------+-----------+-----------+--------+---------+---------+-----------+--------+--------+----------+---------+ 2182 * | ECV | FGT | RES0 | ExS | TGran4_2 | TGran64_2 | TGran16_2 | TGran4 | TGran64 | TGran16 | BigEndEL0 | SNSMem | BigEnd | ASIDBits | PARange | 2183 * +-------+-------+------------+-------+----------+-----------+-----------+--------+---------+---------+-----------+--------+--------+----------+---------+ 2184 */ 2185 2186 #define ID_AA64MMFR0_EL1_ECV_OFFSET 60 2187 #define ID_AA64MMFR0_EL1_ECV_MASK (0xfull << ID_AA64MMFR2_EL1_AT_OFFSET) 2188 #define ID_AA64MMFR0_EL1_ECV_EN (1ull << ID_AA64MMFR2_EL1_AT_OFFSET) 2189 2190 /* 2191 * ID_AA64MMFR2_EL1 - AArch64 Memory Model Feature Register 2 2192 * 63 60 59 56 55 52 51 48 47 44 43 40 39 36 35 32 31 28 27 24 23 20 19 16 15 12 14 8 7 4 3 0 2193 * +------+-------+-------+-------+--------+-------+-------+------+------+------+-------+---------+------+-------+-------+-------+ 2194 * | E0PD | EVT | BBM | TTL | RES0 | FWB | IDS | AT | ST | NV | CCIDX | VARANGE | IESB | LSM | UAO | CnP | 2195 * +------+-------+-------+-------+--------+-------+-------+------+------+------+-------+---------+------+-------+-------+-------+ 2196 */ 2197 2198 #define ID_AA64MMFR2_EL1_AT_OFFSET 32 2199 #define ID_AA64MMFR2_EL1_AT_MASK (0xfull << ID_AA64MMFR2_EL1_AT_OFFSET) 2200 #define ID_AA64MMFR2_EL1_AT_LSE2_EN (1ull << ID_AA64MMFR2_EL1_AT_OFFSET) 2201 2202 /* 2203 * ID_AA64PFR0_EL1 - AArch64 Processor Feature Register 0 2204 * 63 60 59 56 55 52 51 48 47 44 43 40 39 36 35 32 31 28 27 24 23 20 19 16 15 12 11 8 7 4 3 0 2205 * +--------+--------+--------+-------+-------+--------+--------+-------+-------+-----+---------+------+-----+-----+-----+-----+ 2206 * | CSV3 | CSV2 | RES0 | DIT | AMU | MPAM | SEL2 | SVE | RAS | GIC | AdvSIMD | FP | EL3 | EL2 | EL1 | EL0 | 2207 * +--------+--------+--------+-------+-------+--------+--------+-------+-------+-----+---------+------+-----+-----+-----+-----+ 2208 */ 2209 2210 #define ID_AA64PFR0_EL1_CSV3_OFFSET 60 2211 #define ID_AA64PFR0_EL1_CSV3_MASK (0xfull << ID_AA64PFR0_EL1_CSV3_OFFSET) 2212 #define ID_AA64PFR0_EL1_CSV3_EN (1ull << ID_AA64PFR0_EL1_CSV3_OFFSET) 2213 2214 #define ID_AA64PFR0_EL1_CSV2_OFFSET 56 2215 #define ID_AA64PFR0_EL1_CSV2_MASK (0xfull << ID_AA64PFR0_EL1_CSV2_OFFSET) 2216 #define ID_AA64PFR0_EL1_CSV2_EN (1ull << ID_AA64PFR0_EL1_CSV2_OFFSET) 2217 2218 #define ID_AA64PFR0_EL1_DIT_OFFSET 48 2219 #define ID_AA64PFR0_EL1_DIT_MASK (0xfull << ID_AA64PFR0_EL1_DIT_OFFSET) 2220 #define ID_AA64PFR0_EL1_DIT_EN (1ull << ID_AA64PFR0_EL1_DIT_OFFSET) 2221 2222 #define ID_AA64PFR0_EL1_AdvSIMD_OFFSET 20 2223 #define ID_AA64PFR0_EL1_AdvSIMD_MASK (0xfull << ID_AA64PFR0_EL1_AdvSIMD_OFFSET) 2224 #define ID_AA64PFR0_EL1_AdvSIMD_HPFPCVT (0x0ull << ID_AA64PFR0_EL1_AdvSIMD_OFFSET) 2225 #define ID_AA64PFR0_EL1_AdvSIMD_FP16 (0x1ull << ID_AA64PFR0_EL1_AdvSIMD_OFFSET) 2226 #define ID_AA64PFR0_EL1_AdvSIMD_DIS (0xfull << ID_AA64PFR0_EL1_AdvSIMD_OFFSET) 2227 2228 /* 2229 * ID_AA64PFR1_EL1 - AArch64 Processor Feature Register 1 2230 * 63 20 19 16 15 12 11 8 7 4 3 0 2231 * +----------------------------------+-----------+----------+-------+------+------+ 2232 * | RES0 | MPAM_frac | RAS_frac | MTE | SSBS | BT | 2233 * +----------------------------------+-----------+----------+-------+------+------+ 2234 */ 2235 2236 #define ID_AA64PFR1_EL1_SSBS_OFFSET 4 2237 #define ID_AA64PFR1_EL1_SSBS_MASK (0xfull << ID_AA64PFR1_EL1_SSBS_OFFSET) 2238 #define ID_AA64PFR1_EL1_SSBS_EN (1ull << ID_AA64PFR1_EL1_SSBS_OFFSET) 2239 2240 #define ID_AA64PFR1_EL1_BT_OFFSET 0 2241 #define ID_AA64PFR1_EL1_BT_MASK (0xfull << ID_AA64PFR1_EL1_BT_OFFSET) 2242 #define ID_AA64PFR1_EL1_BT_EN (1ull << ID_AA64PFR1_EL1_BT_OFFSET) 2243 2244 /* 2245 * ID_AA64MMFR1_EL1 - AArch64 Memory Model Feature Register 1 2246 * 2247 * 63 52 51 48 47 44 43 40 39 36 35 32 31 28 27 24 23 20 19 16 15 12 11 8 7 4 3 0 2248 * +------+--------+-----+-----+-----+-----+------+---------+-------+------+------+------+----------+--------+ 2249 * | res0 | nTLBPA | AFP | HCX | ETS | TWED | XNX | SpecSEI | PAN | LO | HPDS | VH | VMIDBits | HAFDBS | 2250 * +------+--------+-----+-----+-----+-----+------+---------+-------+------+------+------+----------+--------+ 2251 */ 2252 2253 #define ID_AA64MMFR1_EL1_AFP_OFFSET 44 2254 #define ID_AA64MMFR1_EL1_AFP_MASK (0xfull << ID_AA64MMFR1_EL1_AFP_OFFSET) 2255 #define ID_AA64MMFR1_EL1_AFP_EN (1ull << ID_AA64MMFR1_EL1_AFP_OFFSET) 2256 2257 2258 2259 #define APSTATE_G_SHIFT (0) 2260 #define APSTATE_P_SHIFT (1) 2261 #define APSTATE_A_SHIFT (2) 2262 #define APSTATE_AP_MASK ((1ULL << APSTATE_A_SHIFT) | (1ULL << APSTATE_P_SHIFT)) 2263 2264 2265 #define ACTLR_EL1_EnTSO (1ULL << 1) 2266 #define ACTLR_EL1_EnAPFLG (1ULL << 4) 2267 #define ACTLR_EL1_EnAFP (1ULL << 5) 2268 #define ACTLR_EL1_EnPRSV (1ULL << 6) 2269 2270 2271 #if HAS_USAT_BIT 2272 #define ACTLR_EL1_USAT_OFFSET 0 2273 #define ACTLR_EL1_USAT_MASK (1ULL << ACTLR_EL1_USAT_OFFSET) 2274 #define ACTLR_EL1_USAT ACTLR_EL1_USAT_MASK 2275 #endif 2276 #define ACTLR_EL1_DisHWP_OFFSET 3 2277 #define ACTLR_EL1_DisHWP_MASK (1ULL << ACTLR_EL1_DisHWP_OFFSET) 2278 #define ACTLR_EL1_DisHWP ACTLR_EL1_DisHWP_MASK 2279 2280 2281 2282 2283 #if defined(HAS_APPLE_PAC) 2284 // The value of ptrauth_string_discriminator("recover"), hardcoded so it can be used from assembly code 2285 #define PAC_DISCRIMINATOR_RECOVER 0x1e02 2286 #endif 2287 2288 2289 #define CTR_EL0_L1Ip_OFFSET 14 2290 #define CTR_EL0_L1Ip_VIPT (2ULL << CTR_EL0_L1Ip_OFFSET) 2291 #define CTR_EL0_L1Ip_PIPT (3ULL << CTR_EL0_L1Ip_OFFSET) 2292 #define CTR_EL0_L1Ip_MASK (3ULL << CTR_EL0_L1Ip_OFFSET) 2293 2294 2295 #ifdef __ASSEMBLER__ 2296 2297 /* 2298 * Conditionally write to system/special-purpose register. 2299 * The register is written to only when the first two arguments 2300 * do not match. If they do match, the macro jumps to a 2301 * caller-provided label. 2302 * The _ISB variant also conditionally issues an ISB after the MSR. 2303 * 2304 * $0 - System/special-purpose register to modify 2305 * $1 - Register containing current FPCR value 2306 * $2 - Register containing expected value 2307 * $3 - Label to jump to when register is already set to expected value 2308 */ 2309 .macro CMSR 2310 cmp $1, $2 2311 2312 /* Skip expensive MSR if not required */ 2313 b.eq $3f 2314 msr $0, $2 2315 .endmacro 2316 2317 .macro CMSR_ISB 2318 CMSR $0, $1, $2, $3 2319 isb sy 2320 .endmacro 2321 2322 /* 2323 * Modify FPCR only if it does not contain the XNU default value. 2324 * $0 - Register containing current FPCR value 2325 * $1 - Scratch register 2326 * $2 - Label to jump to when FPCR is already set to default value 2327 */ 2328 .macro SANITIZE_FPCR 2329 mov $1, #FPCR_DEFAULT 2330 CMSR FPCR, $0, $1, $2 2331 .endmacro 2332 2333 /* 2334 * Family of macros that can be used to protect code sections such that they 2335 * are only executed on a particular SoC/Revision/CPU, and skipped otherwise. 2336 * All macros will forward-jump to 1f when the condition is not matched. 2337 * This label may be defined manually, or implicitly through the use of 2338 * the EXEC_END macro. 2339 * For cores, XX can be: EQ (equal), ALL (don't care). 2340 * For revisions, XX can be: EQ (equal), LO (lower than), HS (higher or same), ALL (don't care). 2341 */ 2342 2343 /* 2344 * $0 - MIDR_SOC[_CORE], e.g. MIDR_ARUBA_VORTEX 2345 * $1 - CPU_VERSION_XX, e.g. CPU_VERSION_B1 2346 * $2 - GPR containing MIDR_EL1 value 2347 * $3 - Scratch register 2348 */ 2349 .macro EXEC_COREEQ_REVEQ 2350 and $3, $2, #MIDR_EL1_PNUM_MASK 2351 cmp $3, $0 2352 b.ne 1f 2353 2354 mov $3, $2 2355 bfi $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #4 2356 ubfx $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #8 2357 cmp $3, $1 2358 b.ne 1f 2359 .endmacro 2360 2361 .macro EXEC_COREEQ_REVLO 2362 and $3, $2, #MIDR_EL1_PNUM_MASK 2363 cmp $3, $0 2364 b.ne 1f 2365 2366 mov $3, $2 2367 bfi $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #4 2368 ubfx $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #8 2369 cmp $3, $1 2370 b.pl 1f 2371 .endmacro 2372 2373 .macro EXEC_COREEQ_REVHS 2374 and $3, $2, #MIDR_EL1_PNUM_MASK 2375 cmp $3, $0 2376 b.ne 1f 2377 2378 mov $3, $2 2379 bfi $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #4 2380 ubfx $3, $3, #(MIDR_EL1_VAR_SHIFT - 4), #8 2381 cmp $3, $1 2382 b.mi 1f 2383 .endmacro 2384 2385 /* 2386 * $0 - CPU_VERSION_XX, e.g. CPU_VERSION_B1 2387 * $1 - GPR containing MIDR_EL1 value 2388 * $2 - Scratch register 2389 */ 2390 .macro EXEC_COREALL_REVEQ 2391 mov $2, $1 2392 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2393 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2394 cmp $2, $0 2395 b.ne 1f 2396 .endmacro 2397 2398 .macro EXEC_COREALL_REVLO 2399 mov $2, $1 2400 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2401 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2402 cmp $2, $0 2403 b.pl 1f 2404 .endmacro 2405 2406 .macro EXEC_COREALL_REVHS 2407 mov $2, $1 2408 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2409 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2410 cmp $2, $0 2411 b.mi 1f 2412 .endmacro 2413 2414 .macro CMP_FOREACH reg, cc, label, car, cdr:vararg 2415 cmp \reg, \car 2416 b.\cc \label 2417 .ifnb \cdr 2418 CMP_FOREACH \reg, \cc, \label, \cdr 2419 .endif 2420 .endm 2421 2422 .macro EXEC_COREIN_REVALL midr_el1, scratch, midr_list:vararg 2423 and \scratch, \midr_el1, #MIDR_EL1_PNUM_MASK 2424 CMP_FOREACH \scratch, eq, Lmatch\@, \midr_list 2425 b 1f 2426 Lmatch\@: 2427 .endm 2428 2429 /* 2430 * $0 - MIDR_SOC[_CORE], e.g. MIDR_ARUBA_VORTEX 2431 * $1 - GPR containing MIDR_EL1 value 2432 * $2 - Scratch register 2433 */ 2434 .macro EXEC_COREEQ_REVALL 2435 and $2, $1, #MIDR_EL1_PNUM_MASK 2436 cmp $2, $0 2437 b.ne 1f 2438 .endmacro 2439 2440 /* 2441 * $0 - CPU_VERSION_XX, e.g. CPU_VERSION_B1 2442 * $1 - GPR containing MIDR_EL1 value 2443 * $2 - Scratch register 2444 */ 2445 .macro EXEC_PCORE_REVEQ 2446 mrs $2, MPIDR_EL1 2447 and $2, $2, #(MPIDR_PNE) 2448 cmp $2, xzr 2449 b.eq 1f 2450 2451 mov $2, $1 2452 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2453 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2454 cmp $2, $0 2455 b.ne 1f 2456 .endmacro 2457 2458 .macro EXEC_PCORE_REVLO 2459 mrs $2, MPIDR_EL1 2460 and $2, $2, #(MPIDR_PNE) 2461 cmp $2, xzr 2462 b.eq 1f 2463 2464 mov $2, $1 2465 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2466 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2467 cmp $2, $0 2468 b.pl 1f 2469 .endmacro 2470 2471 .macro EXEC_PCORE_REVHS 2472 mrs $2, MPIDR_EL1 2473 and $2, $2, #(MPIDR_PNE) 2474 cmp $2, xzr 2475 b.eq 1f 2476 2477 mov $2, $1 2478 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2479 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2480 cmp $2, $0 2481 b.mi 1f 2482 .endmacro 2483 2484 .macro EXEC_ECORE_REVEQ 2485 mrs $2, MPIDR_EL1 2486 and $2, $2, #(MPIDR_PNE) 2487 cmp $2, xzr 2488 b.ne 1f 2489 2490 mov $2, $1 2491 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2492 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2493 cmp $2, $0 2494 b.ne 1f 2495 .endmacro 2496 2497 .macro EXEC_ECORE_REVLO 2498 mrs $2, MPIDR_EL1 2499 and $2, $2, #(MPIDR_PNE) 2500 cmp $2, xzr 2501 b.ne 1f 2502 2503 mov $2, $1 2504 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2505 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2506 cmp $2, $0 2507 b.pl 1f 2508 .endmacro 2509 2510 .macro EXEC_ECORE_REVHS 2511 mrs $2, MPIDR_EL1 2512 and $2, $2, #(MPIDR_PNE) 2513 cmp $2, xzr 2514 b.ne 1f 2515 2516 mov $2, $1 2517 bfi $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #4 2518 ubfx $2, $2, #(MIDR_EL1_VAR_SHIFT - 4), #8 2519 cmp $2, $0 2520 b.mi 1f 2521 .endmacro 2522 2523 /* 2524 * $0 - GPR containing MIDR_EL1 value 2525 * $1 - Scratch register 2526 */ 2527 .macro EXEC_PCORE_REVALL 2528 mrs $1, MPIDR_EL1 2529 and $1, $1, #(MPIDR_PNE) 2530 cmp $1, xzr 2531 b.eq 1f 2532 .endmacro 2533 2534 .macro EXEC_ECORE_REVALL 2535 mrs $1, MPIDR_EL1 2536 and $1, $1, #(MPIDR_PNE) 2537 cmp $1, xzr 2538 b.ne 1f 2539 .endmacro 2540 2541 2542 2543 /* 2544 * Macro that defines the label that all EXEC_COREXX_REVXX macros jump to. 2545 */ 2546 .macro EXEC_END 2547 1: 2548 .endmacro 2549 2550 /* 2551 * Wedges CPUs with a specified core that are below a specified revision. This 2552 * macro is intended for CPUs that have been deprecated in iBoot and may have 2553 * incorrect behavior if they continue running xnu. 2554 */ 2555 .macro DEPRECATE_COREEQ_REVLO core, rev, midr_el1, scratch 2556 EXEC_COREEQ_REVLO \core, \rev, \midr_el1, \scratch 2557 /* BEGIN IGNORE CODESTYLE */ 2558 b . 2559 /* END IGNORE CODESTYLE */ 2560 EXEC_END 2561 .endmacro 2562 2563 /* 2564 * Sets bits in an SPR register. 2565 * arg0: Name of the register to be accessed. 2566 * arg1: Mask of bits to be set. 2567 * arg2: Scratch register 2568 */ 2569 .macro HID_SET_BITS 2570 mrs $2, $0 2571 orr $2, $2, $1 2572 msr $0, $2 2573 .endmacro 2574 2575 /* 2576 * Clears bits in an SPR register. 2577 * arg0: Name of the register to be accessed. 2578 * arg1: Mask of bits to be cleared. 2579 * arg2: Scratch register 2580 */ 2581 .macro HID_CLEAR_BITS 2582 mrs $2, $0 2583 bic $2, $2, $1 2584 msr $0, $2 2585 .endmacro 2586 2587 /* 2588 * Clears bits in an SPR register. 2589 * arg0: Name of the register to be accessed. 2590 * arg1: Mask of bits to be cleared. 2591 * arg2: Value to insert 2592 * arg3: Scratch register 2593 */ 2594 .macro HID_INSERT_BITS 2595 mrs $3, $0 2596 bic $3, $3, $1 2597 orr $3, $3, $2 2598 msr $0, $3 2599 .endmacro 2600 2601 /* 2602 * Replaces the value of a field in an implementation-defined system register. 2603 * sreg: system register name 2604 * field: field name within the sysreg, where the assembler symbols 2605 * ARM64_REG_<field>_{shift,width} specify the bounds of the field 2606 * (note that preprocessor macros will not work here) 2607 * value: the value to insert 2608 * scr{1,2}: scratch regs 2609 */ 2610 .macro HID_WRITE_FIELD sreg, field, val, scr1, scr2 2611 mrs \scr1, \sreg 2612 mov \scr2, \val 2613 bfi \scr1, \scr2, ARM64_REG_\sreg\()_\field\()_shift, ARM64_REG_\sreg\()_\field\()_width 2614 msr \sreg, \scr1 2615 .endmacro 2616 2617 /* 2618 * Macro intended to be used as a replacement for ERET. 2619 * It prevents speculation past ERET instructions by padding 2620 * up to the decoder width. 2621 */ 2622 .macro ERET_CONTEXT_SYNCHRONIZING 2623 eret 2624 #if __ARM_SB_AVAILABLE__ 2625 sb // Technically unnecessary on Apple micro-architectures, may restrict mis-speculation on other architectures 2626 #else /* __ARM_SB_AVAILABLE__ */ 2627 isb // ISB technically unnecessary on Apple micro-architectures, may restrict mis-speculation on other architectures 2628 nop // Sequence of six NOPs to pad out and terminate instruction decode group */ 2629 nop 2630 nop 2631 nop 2632 nop 2633 nop 2634 #endif /* !__ARM_SB_AVAILABLE__ */ 2635 .endmacro 2636 2637 #endif /* __ASSEMBLER__ */ 2638 2639 #define MSR(reg, src) __asm__ volatile ("msr " reg ", %0" :: "r" (src)) 2640 #define MRS(dest, reg) __asm__ volatile ("mrs %0, " reg : "=r" (dest)) 2641 2642 #if XNU_MONITOR 2643 #define __ARM_PTE_PHYSMAP__ 1 2644 #define PPL_STATE_KERNEL 0 2645 #define PPL_STATE_DISPATCH 1 2646 #define PPL_STATE_PANIC 2 2647 #define PPL_STATE_EXCEPTION 3 2648 #endif 2649 2650 2651 #endif /* _ARM64_PROC_REG_H_ */ 2652