xref: /xnu-12377.81.4/osfmk/arm64/pac_asm.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _ARM64_PAC_ASM_H_
30 #define _ARM64_PAC_ASM_H_
31 
32 #ifndef __ASSEMBLER__
33 #error "This header should only be used in .s files"
34 #endif
35 
36 #include <pexpert/arm64/board_config.h>
37 #include <arm64/proc_reg.h>
38 #if HAS_PARAVIRTUALIZED_PAC
39 #include <arm64/hv_hvc.h>
40 #include "smccc_asm.h"
41 #endif
42 #include "assym.s"
43 
44 #if defined(HAS_APPLE_PAC)
45 
46 
47 /* BEGIN IGNORE CODESTYLE */
48 
49 /**
50  * REPROGRAM_JOP_KEYS
51  *
52  * Loads a userspace process's JOP key (task->jop_pid) into the CPU, and
53  * updates current_cpu_datap()->jop_key accordingly.  This reprogramming process
54  * is skipped whenever the "new" JOP key has already been loaded into the CPU.
55  *
56  *   skip_label - branch to this label if new_jop_key is already loaded into CPU
57  *   new_jop_key - process's jop_pid
58  *   cpudatap - current cpu_data_t *
59  *   tmp - scratch register
60  */
61 .macro REPROGRAM_JOP_KEYS	skip_label, new_jop_key, cpudatap, tmp
62 	ldr		\tmp, [\cpudatap, CPU_JOP_KEY]
63 	cmp		\new_jop_key, \tmp
64 	b.eq	\skip_label
65 	SET_JOP_KEY_REGISTERS	\new_jop_key, \tmp
66 	str		\new_jop_key, [\cpudatap, CPU_JOP_KEY]
67 .endmacro
68 
69 /**
70  * REPROGRAM_ROP_KEYS
71  *
72  * Loads a userspace process's ROP key (task->rop_pid) into the CPU, and
73  * updates current_cpu_datap()->rop_key accordingly.  This reprogramming process
74  * is skipped whenever the "new" ROP key has already been loaded into the CPU.
75  *
76  *   skip_label - branch to this label if new_rop_key is already loaded into CPU
77  *   new_rop_key - process's rop_pid
78  *   cpudatap - current cpu_data_t *
79  *   tmp - scratch register
80  */
81 .macro REPROGRAM_ROP_KEYS	skip_label, new_rop_key, cpudatap, tmp
82 	ldr		\tmp, [\cpudatap, CPU_ROP_KEY]
83 	cmp		\new_rop_key, \tmp
84 	b.eq	\skip_label
85 	SET_ROP_KEY_REGISTERS	\new_rop_key, \tmp
86 	str		\new_rop_key, [\cpudatap, CPU_ROP_KEY]
87 .endmacro
88 
89 /**
90  * SET_JOP_KEY_REGISTERS
91  *
92  * Unconditionally loads a userspace process's JOP key (task->jop_pid) into the
93  * CPU.  The caller is responsible for updating current_cpu_datap()->jop_key as
94  * needed.
95  *
96  *   new_jop_key - process's jop_pid
97  *   tmp - scratch register
98  */
99 .macro SET_JOP_KEY_REGISTERS	new_jop_key, tmp
100 #if HAS_PARAVIRTUALIZED_PAC
101 	SAVE_SMCCC_CLOBBERED_REGISTERS
102 	/*
103 	 * We're deliberately calling PAC_SET_EL0_DIVERSIFIER here, even though the
104 	 * EL0 diversifier affects both A (JOP) and B (ROP) keys.  We don't want
105 	 * SET_JOP_KEY_REGISTERS to have an impact on the EL1 A key state, since
106 	 * these are the keys the kernel uses to sign pointers on the heap.
107 	 *
108 	 * Using new_jop_key as the EL0 diversifer has the same net effect of giving
109 	 * userspace its own set of JOP keys, but doesn't affect EL1 A key state.
110 	 */
111 	MOV64	x0, VMAPPLE_PAC_SET_EL0_DIVERSIFIER
112 	mov		x1, \new_jop_key
113 	hvc		#0
114 	cbnz		x0, .
115 	LOAD_SMCCC_CLOBBERED_REGISTERS
116 #endif /* HAS_PARAVIRTUALIZED_PAC */
117 .endmacro
118 
119 /**
120  * SET_ROP_KEY_REGISTERS
121  *
122  * Unconditionally loads a userspace process's ROP key (task->rop_pid) into the
123  * CPU.  The caller is responsible for updating current_cpu_datap()->rop_key as
124  * needed.
125  *
126  *   new_rop_key - process's rop_pid
127  *   tmp - scratch register
128  */
129 .macro SET_ROP_KEY_REGISTERS	new_rop_key, tmp
130 #if HAS_PARAVIRTUALIZED_PAC
131 	SAVE_SMCCC_CLOBBERED_REGISTERS
132 	MOV64	x0, VMAPPLE_PAC_SET_B_KEYS
133 	mov		x1, \new_rop_key
134 	hvc		#0
135 	cbnz		x0, .
136 	LOAD_SMCCC_CLOBBERED_REGISTERS
137 #endif /* HAS_PARAVIRTUALIZED_PAC */
138 .endmacro
139 
140 /**
141  * PAC_INIT_KEY_STATE
142  *
143  * Sets the initial PAC key state, but does not enable the keys.
144  *
145  *   tmp - scratch register
146  *   tmp2 - scratch register
147  */
148 .macro PAC_INIT_KEY_STATE	tmp, tmp2
149 #if HAS_PARAVIRTUALIZED_PAC
150 #if HIBERNATION
151 	#error PAC_INIT_KEY_STATE is not implemented for HAS_PARAVIRTUALIZED_PAC && HIBERNATION
152 #endif
153 	/*
154 	 * This call clobbers x0-x3.  However we only initialize PAC at a point in
155 	 * common_start where x0-x3 are safe to clobber, and where we don't yet have
156 	 * a working stack to stash the existing values anyway.
157 	 */
158 	mov		x0, #VMAPPLE_PAC_SET_INITIAL_STATE
159 	hvc		#0
160 	cbnz		x0, .
161 #endif /* HAS_PARAVIRTUALIZED_PAC */
162 .endmacro
163 
164 /*
165  * For pacga diversification we always put the tag type in the
166  * lowest 4 bits of the first source to pacga.
167  *
168  * We diversify by use case, to prevent attackers from using
169  * pacga results from one usecase to attack another usecase.
170  *
171  * First pacga when using a context:
172  * pacga chain_reg, (context << 4) + PACGA_TAG_xxx, first_data
173  *
174  * First pacga without context:
175  * pacga chain_reg, PACGA_TAG_xxx, first_data
176  *
177  * Subsequent pacga's:
178  * pacga chain_reg, chain_reg + PACGA_TAG_xxx, next_data
179  *
180  * chain_reg layout
181  * 63 .. 32    || 31 .. 4                | 3 .. 0
182  * chain value || available per use case | TAG
183  */
184 #define PACGA_TAG_0         0b0000
185 #define PACGA_TAG_BLOB      0b0001
186 #define PACGA_TAG_THREAD    0b0010
187 #define PACGA_TAG_IRG       0b0011
188 #define PACGA_TAG_HV        0b0100
189 #define PACGA_TAG_ADDRPERM  0b0101
190 #define PACGA_TAG_6         0b0110
191 #define PACGA_TAG_7         0b0111
192 #define PACGA_TAG_8         0b1000
193 #define PACGA_TAG_9         0b1001
194 /*
195  * This one is never actually used, it is here to effectively made the THREAD TAG
196  * 3 bits, so we can sign enough of PC there for the foreseeable future (up to 128M of kernel .text)
197  */
198 #define PACGA_TAG_THREAD_2  0b1010
199 #define PACGA_TAG_b         0b1011
200 #define PACGA_TAG_c         0b1100
201 #define PACGA_TAG_d         0b1101
202 #define PACGA_TAG_e         0b1110
203 #define PACGA_TAG_f         0b1111
204 
205 
206 
207 #if NEEDS_MTE_IRG_RESEED
208 /*
209  * This generates a new 48 bit seed, based on the reseed_counter.
210  * We use the PACGA_TAG_IRG here. and we generate two 32bit values
211  * using pacga twice. using:
212  * cpu_number || 0b0000 || TAG_IRG(0b0011), counter
213  * cpu_number || 0b0001 || TAG_IRG(0b0011), counter
214  *
215  * Needs three registers to use, all will be clobbered
216  * Expects interrupts to be disabled.
217  */
218 .macro PACGA_IRG_RESEED x, y, z
219 	mrs     \x, TPIDR_EL1
220 	ldr     \x, [\x, ACT_CPUDATAP]
221 
222 	// No need for atomic - These are per CPU and interrupts are disabled
223 	ldr     \z, [\x, CPU_IRG_RESEED_COUNTER]
224 	add     \y, \z, #1
225 	str     \y, [\x, CPU_IRG_RESEED_COUNTER]
226 
227 	ldrsh   \y, [\x, CPU_NUMBER_GS]
228 	lsl     \y, \y, 8
229 	add     \y, \y, #0x3                    // Will be resolved after rdar://123719761
230 
231 	pacga   \x, \y, \z                      // PACGA_TAG_IRG
232 	add     \y, \y, 0x10
233 	pacga   \y, \y, \z                      // PACGA_TAG_IRG
234 	eor     \y, \x, \y, LSR #32
235 
236 #if APPLEVIRTUALPLATFORM
237 	mrs		\x, GCR_EL1
238 	tbz		\x, GCR_EL1_RRND_OFFSET, Lpacga_irg_reseed_arm_mode_\@
239 #endif
240 	orr		\y, \y, (0b111 << RGSR_EL1_SEED_OFFSET)
241 #if APPLEVIRTUALPLATFORM
242 Lpacga_irg_reseed_arm_mode_\@:
243 #endif
244 	msr     RGSR_EL1, \y
245 .endmacro
246 #endif /* NEEDS_MTE_IRG_RESEED */
247 
248 /* END IGNORE CODESTYLE */
249 
250 #endif /* defined(HAS_APPLE_PAC) */
251 
252 #endif /* _ARM64_PAC_ASM_H_ */
253 
254 /* vim: set ts=4 ft=asm: */
255