1 /* 2 * Copyright (c) 2018 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #include <arm64/pac_asm.h> 30 #include <pexpert/arm64/board_config.h> 31 #include "assym.s" 32 33 #if XNU_MONITOR 34 /* 35 * Exit path defines; for controlling PPL -> kernel transitions. 36 * These should fit within a 32-bit integer, as the PPL trampoline packs them into a 32-bit field. 37 */ 38 #define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */ 39 #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */ 40 #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */ 41 #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */ 42 43 44 #define KERNEL_MODE_ELR ELR_GL11 45 #define KERNEL_MODE_FAR FAR_GL11 46 #define KERNEL_MODE_ESR ESR_GL11 47 #define KERNEL_MODE_SPSR SPSR_GL11 48 #define KERNEL_MODE_VBAR VBAR_GL11 49 #define KERNEL_MODE_TPIDR TPIDR_GL11 50 51 #define GUARDED_MODE_ELR ELR_EL1 52 #define GUARDED_MODE_FAR FAR_EL1 53 #define GUARDED_MODE_ESR ESR_EL1 54 #define GUARDED_MODE_SPSR SPSR_EL1 55 #define GUARDED_MODE_VBAR VBAR_EL1 56 #define GUARDED_MODE_TPIDR TPIDR_EL1 57 58 /* 59 * LOAD_PMAP_CPU_DATA 60 * 61 * Loads the PPL per-CPU data array entry for the current CPU. 62 * arg0 - Address of the PPL per-CPU data is returned through this 63 * arg1 - Scratch register 64 * arg2 - Scratch register 65 * 66 */ 67 .macro LOAD_PMAP_CPU_DATA 68 /* Get the CPU ID. */ 69 mrs $0, MPIDR_EL1 70 ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH 71 adrp $2, EXT(cluster_offsets)@page 72 add $2, $2, EXT(cluster_offsets)@pageoff 73 ldr $1, [$2, $1, lsl #3] 74 75 and $0, $0, MPIDR_AFF0_MASK 76 add $0, $0, $1 77 78 /* Get the PPL CPU data array. */ 79 adrp $1, EXT(pmap_cpu_data_array)@page 80 add $1, $1, EXT(pmap_cpu_data_array)@pageoff 81 82 /* 83 * Sanity check the CPU ID (this is not a panic because this pertains to 84 * the hardware configuration; this should only fail if our 85 * understanding of the hardware is incorrect). 86 */ 87 cmp $0, MAX_CPUS 88 b.hs . 89 90 mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE 91 /* Get the PPL per-CPU data. */ 92 madd $0, $0, $2, $1 93 .endmacro 94 95 /* 96 * GET_PMAP_CPU_DATA 97 * 98 * Retrieves the PPL per-CPU data for the current CPU. 99 * arg0 - Address of the PPL per-CPU data is returned through this 100 * arg1 - Scratch register 101 * arg2 - Scratch register 102 * 103 */ 104 .macro GET_PMAP_CPU_DATA 105 LOAD_PMAP_CPU_DATA $0, $1, $2 106 .endmacro 107 108 #endif /* XNU_MONITOR */ 109 110 /* 111 * INIT_SAVED_STATE_FLAVORS 112 * 113 * Initializes the saved state flavors of a new saved state structure 114 * arg0 - saved state pointer 115 * arg1 - 32-bit scratch reg 116 * arg2 - 32-bit scratch reg 117 */ 118 .macro INIT_SAVED_STATE_FLAVORS 119 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor 120 mov $2, ARM_SAVED_STATE64_COUNT 121 stp $1, $2, [$0, SS_FLAVOR] 122 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor 123 str $1, [$0, NS_FLAVOR] 124 mov $1, ARM_NEON_SAVED_STATE64_COUNT 125 str $1, [$0, NS_COUNT] 126 .endmacro 127 128 /* 129 * SPILL_REGISTERS 130 * 131 * Spills the current set of registers (excluding x0, x1, sp) to the specified 132 * save area. 133 * 134 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature. 135 * These keys are deliberately kept loaded into the CPU for later kernel use. 136 * 137 * arg0 - KERNEL_MODE or HIBERNATE_MODE 138 * x0 - Address of the save area 139 */ 140 #define KERNEL_MODE 0 141 #define HIBERNATE_MODE 1 142 143 .macro SPILL_REGISTERS mode 144 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs 145 stp x4, x5, [x0, SS64_X4] 146 stp x6, x7, [x0, SS64_X6] 147 stp x8, x9, [x0, SS64_X8] 148 stp x10, x11, [x0, SS64_X10] 149 stp x12, x13, [x0, SS64_X12] 150 stp x14, x15, [x0, SS64_X14] 151 stp x16, x17, [x0, SS64_X16] 152 stp x18, x19, [x0, SS64_X18] 153 stp x20, x21, [x0, SS64_X20] 154 stp x22, x23, [x0, SS64_X22] 155 stp x24, x25, [x0, SS64_X24] 156 stp x26, x27, [x0, SS64_X26] 157 stp x28, fp, [x0, SS64_X28] 158 str lr, [x0, SS64_LR] 159 160 /* Save arm_neon_saved_state64 */ 161 162 stp q0, q1, [x0, NS64_Q0] 163 stp q2, q3, [x0, NS64_Q2] 164 stp q4, q5, [x0, NS64_Q4] 165 stp q6, q7, [x0, NS64_Q6] 166 stp q8, q9, [x0, NS64_Q8] 167 stp q10, q11, [x0, NS64_Q10] 168 stp q12, q13, [x0, NS64_Q12] 169 stp q14, q15, [x0, NS64_Q14] 170 stp q16, q17, [x0, NS64_Q16] 171 stp q18, q19, [x0, NS64_Q18] 172 stp q20, q21, [x0, NS64_Q20] 173 stp q22, q23, [x0, NS64_Q22] 174 stp q24, q25, [x0, NS64_Q24] 175 stp q26, q27, [x0, NS64_Q26] 176 stp q28, q29, [x0, NS64_Q28] 177 stp q30, q31, [x0, NS64_Q30] 178 179 mrs x22, ELR_EL1 // Get exception link register 180 mrs x23, SPSR_EL1 // Load CPSR into var reg x23 181 mrs x24, FPSR 182 mrs x25, FPCR 183 184 #if defined(HAS_APPLE_PAC) 185 .if \mode != HIBERNATE_MODE 186 187 /* Save x1 and LR to preserve across call */ 188 mov x21, x1 189 mov x20, lr 190 191 /* 192 * Create thread state signature 193 * 194 * Arg0: The ARM context pointer 195 * Arg1: The PC value to sign 196 * Arg2: The CPSR value to sign 197 * Arg3: The LR value to sign 198 * Arg4: The X16 value to sign 199 * Arg5: The X17 value to sign 200 */ 201 mov x1, x22 202 mov w2, w23 203 mov x3, x20 204 mov x4, x16 205 mov x5, x17 206 207 mrs x19, SPSel 208 msr SPSel, #1 209 bl _ml_sign_thread_state 210 msr SPSel, x19 211 mov lr, x20 212 mov x1, x21 213 .endif 214 #endif /* defined(HAS_APPLE_PAC) */ 215 216 str x22, [x0, SS64_PC] // Save ELR to PCB 217 str w23, [x0, SS64_CPSR] // Save CPSR to PCB 218 str w24, [x0, NS64_FPSR] 219 str w25, [x0, NS64_FPCR] 220 221 mrs x20, FAR_EL1 222 mrs x21, ESR_EL1 223 224 str x20, [x0, SS64_FAR] 225 str w21, [x0, SS64_ESR] 226 .endmacro 227 228 .macro DEADLOOP 229 b . 230 .endmacro 231 232 // SP0 is expected to already be selected 233 .macro SWITCH_TO_KERN_STACK 234 ldr x1, [x1, TH_KSTACKPTR] // Load the top of the kernel stack to x1 235 mov sp, x1 // Set the stack pointer to the kernel stack 236 .endmacro 237 238 // SP0 is expected to already be selected 239 .macro SWITCH_TO_INT_STACK 240 mrs x1, TPIDR_EL1 241 ldr x1, [x1, ACT_CPUDATAP] 242 ldr x1, [x1, CPU_ISTACKPTR] 243 mov sp, x1 // Set the stack pointer to the interrupt stack 244 .endmacro 245 246