1 /* 2 * Copyright (c) 2018 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #include <arm64/pac_asm.h> 30 #include <pexpert/arm64/board_config.h> 31 #include "assym.s" 32 33 34 #if XNU_MONITOR 35 /* 36 * Exit path defines; for controlling PPL -> kernel transitions. 37 * These should fit within a 32-bit integer, as the PPL trampoline packs them into a 32-bit field. 38 */ 39 #define PPL_EXIT_DISPATCH 0 /* This is a clean exit after a PPL request. */ 40 #define PPL_EXIT_PANIC_CALL 1 /* The PPL has called panic. */ 41 #define PPL_EXIT_BAD_CALL 2 /* The PPL request failed. */ 42 #define PPL_EXIT_EXCEPTION 3 /* The PPL took an exception. */ 43 44 #define KERNEL_MODE_ELR ELR_GL11 45 #define KERNEL_MODE_FAR FAR_GL11 46 #define KERNEL_MODE_ESR ESR_GL11 47 #define KERNEL_MODE_SPSR SPSR_GL11 48 #define KERNEL_MODE_VBAR VBAR_GL11 49 #define KERNEL_MODE_TPIDR TPIDR_GL11 50 51 #define GUARDED_MODE_ELR ELR_EL1 52 #define GUARDED_MODE_FAR FAR_EL1 53 #define GUARDED_MODE_ESR ESR_EL1 54 #define GUARDED_MODE_SPSR SPSR_EL1 55 #define GUARDED_MODE_VBAR VBAR_EL1 56 #define GUARDED_MODE_TPIDR TPIDR_EL1 57 58 /* 59 * LOAD_PMAP_CPU_DATA 60 * 61 * Loads the PPL per-CPU data array entry for the current CPU. 62 * arg0 - Address of the PPL per-CPU data is returned through this 63 * arg1 - Scratch register 64 * arg2 - Scratch register 65 * 66 */ 67 .macro LOAD_PMAP_CPU_DATA 68 /* Get the CPU ID. */ 69 mrs $0, MPIDR_EL1 70 ubfx $1, $0, MPIDR_AFF1_SHIFT, MPIDR_AFF1_WIDTH 71 adrp $2, EXT(cluster_offsets)@page 72 add $2, $2, EXT(cluster_offsets)@pageoff 73 ldr $1, [$2, $1, lsl #3] 74 75 and $0, $0, MPIDR_AFF0_MASK 76 add $0, $0, $1 77 78 /* Get the PPL CPU data array. */ 79 adrp $1, EXT(pmap_cpu_data_array)@page 80 add $1, $1, EXT(pmap_cpu_data_array)@pageoff 81 82 /* 83 * Sanity check the CPU ID (this is not a panic because this pertains to 84 * the hardware configuration; this should only fail if our 85 * understanding of the hardware is incorrect). 86 */ 87 cmp $0, MAX_CPUS 88 b.hs . 89 90 mov $2, PMAP_CPU_DATA_ARRAY_ENTRY_SIZE 91 /* Get the PPL per-CPU data. */ 92 madd $0, $0, $2, $1 93 .endmacro 94 95 /* 96 * GET_PMAP_CPU_DATA 97 * 98 * Retrieves the PPL per-CPU data for the current CPU. 99 * arg0 - Address of the PPL per-CPU data is returned through this 100 * arg1 - Scratch register 101 * arg2 - Scratch register 102 * 103 */ 104 .macro GET_PMAP_CPU_DATA 105 LOAD_PMAP_CPU_DATA $0, $1, $2 106 .endmacro 107 108 #endif /* XNU_MONITOR */ 109 110 /* 111 * INIT_SAVED_STATE_FLAVORS 112 * 113 * Initializes the saved state flavors of a new saved state structure 114 * arg0 - saved state pointer 115 * arg1 - 32-bit scratch reg 116 * arg2 - 32-bit scratch reg 117 */ 118 .macro INIT_SAVED_STATE_FLAVORS 119 mov $1, ARM_SAVED_STATE64 // Set saved state to 64-bit flavor 120 mov $2, ARM_SAVED_STATE64_COUNT 121 stp $1, $2, [$0, SS_FLAVOR] 122 mov $1, ARM_NEON_SAVED_STATE64 // Set neon state to 64-bit flavor 123 str $1, [$0, NS_FLAVOR] 124 mov $1, ARM_NEON_SAVED_STATE64_COUNT 125 str $1, [$0, NS_COUNT] 126 .endmacro 127 128 /* 129 * SPILL_REGISTERS 130 * 131 * Spills the current set of registers (excluding x0, x1, sp) to the specified 132 * save area. 133 * 134 * On CPUs with PAC, the kernel "A" keys are used to create a thread signature. 135 * These keys are deliberately kept loaded into the CPU for later kernel use. 136 * 137 * arg0 - KERNEL_MODE or HIBERNATE_MODE 138 * arg1 - ADD_THREAD_SIGNATURE or POISON_THREAD_SIGNATURE 139 * x0 - Address of the save area 140 * x25 - Return the value of FPCR 141 */ 142 #define KERNEL_MODE 0 143 #define HIBERNATE_MODE 1 144 145 #define ADD_THREAD_SIGNATURE 0 146 #define POISON_THREAD_SIGNATURE 1 147 148 .macro SPILL_REGISTERS mode, signing_mode = ADD_THREAD_SIGNATURE 149 stp x2, x3, [x0, SS64_X2] // Save remaining GPRs 150 stp x4, x5, [x0, SS64_X4] 151 stp x6, x7, [x0, SS64_X6] 152 stp x8, x9, [x0, SS64_X8] 153 stp x10, x11, [x0, SS64_X10] 154 stp x12, x13, [x0, SS64_X12] 155 stp x14, x15, [x0, SS64_X14] 156 stp x16, x17, [x0, SS64_X16] 157 stp x18, x19, [x0, SS64_X18] 158 stp x20, x21, [x0, SS64_X20] 159 stp x22, x23, [x0, SS64_X22] 160 stp x24, x25, [x0, SS64_X24] 161 stp x26, x27, [x0, SS64_X26] 162 stp x28, fp, [x0, SS64_X28] 163 str lr, [x0, SS64_LR] 164 165 /* Save arm_neon_saved_state64 */ 166 stp q0, q1, [x0, NS64_Q0] 167 stp q2, q3, [x0, NS64_Q2] 168 stp q4, q5, [x0, NS64_Q4] 169 stp q6, q7, [x0, NS64_Q6] 170 stp q8, q9, [x0, NS64_Q8] 171 stp q10, q11, [x0, NS64_Q10] 172 stp q12, q13, [x0, NS64_Q12] 173 stp q14, q15, [x0, NS64_Q14] 174 stp q16, q17, [x0, NS64_Q16] 175 stp q18, q19, [x0, NS64_Q18] 176 stp q20, q21, [x0, NS64_Q20] 177 stp q22, q23, [x0, NS64_Q22] 178 stp q24, q25, [x0, NS64_Q24] 179 stp q26, q27, [x0, NS64_Q26] 180 stp q28, q29, [x0, NS64_Q28] 181 stp q30, q31, [x0, NS64_Q30] 182 mrs x24, FPSR 183 str w24, [x0, NS64_FPSR] 184 mrs x25, FPCR 185 str w25, [x0, NS64_FPCR] 186 Lsave_neon_state_done_\@: 187 188 mrs x22, ELR_EL1 // Get exception link register 189 mrs x23, SPSR_EL1 // Load CPSR into var reg x23 190 191 #if defined(HAS_APPLE_PAC) 192 .if \mode != HIBERNATE_MODE 193 194 .if \signing_mode == POISON_THREAD_SIGNATURE 195 mov x21, #-1 196 str x21, [x0, SS64_JOPHASH] 197 .else 198 /* Save x1 and LR to preserve across call */ 199 mov x21, x1 200 mov x20, lr 201 202 /* 203 * Create thread state signature 204 * 205 * Arg0: The ARM context pointer 206 * Arg1: The PC value to sign 207 * Arg2: The CPSR value to sign 208 * Arg3: The LR value to sign 209 * Arg4: The X16 value to sign 210 * Arg5: The X17 value to sign 211 */ 212 mov x1, x22 213 mov w2, w23 214 mov x3, x20 215 mov x4, x16 216 mov x5, x17 217 218 mrs x19, SPSel 219 msr SPSel, #1 220 bl _ml_sign_thread_state 221 msr SPSel, x19 222 mov lr, x20 223 mov x1, x21 224 .endif 225 226 .endif 227 #endif /* defined(HAS_APPLE_PAC) */ 228 229 str x22, [x0, SS64_PC] // Save ELR to PCB 230 str w23, [x0, SS64_CPSR] // Save CPSR to PCB 231 232 mrs x20, FAR_EL1 233 mrs x21, ESR_EL1 234 235 str x20, [x0, SS64_FAR] 236 str w21, [x0, SS64_ESR] 237 .endmacro 238 239 .macro DEADLOOP 240 b . 241 .endmacro 242 243 /** 244 * Reloads SP with the current thread's interrupt stack. 245 * 246 * SP0 is expected to already be selected. Clobbers x1 and tmp. 247 */ 248 .macro SWITCH_TO_INT_STACK tmp 249 mrs x1, TPIDR_EL1 250 LOAD_INT_STACK dst=x1, src=x1, tmp=\tmp 251 mov sp, x1 // Set the stack pointer to the interrupt stack 252 .endmacro 253 254