xref: /xnu-11417.121.6/osfmk/mach/arm/thread_status.h (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * FILE_ID: thread_status.h
30  */
31 
32 
33 #ifndef _ARM_THREAD_STATUS_H_
34 #define _ARM_THREAD_STATUS_H_
35 
36 #if defined (__arm__) || defined (__arm64__)
37 
38 #include <mach/machine/_structs.h>
39 #include <mach/machine/thread_state.h>
40 #include <mach/message.h>
41 #include <mach/vm_types.h>
42 
43 #ifdef XNU_KERNEL_PRIVATE
44 #include <os/refcnt.h>
45 #endif
46 
47 /*
48  *    Support for determining the state of a thread
49  */
50 
51 
52 /*
53  *  Flavors
54  */
55 
56 #define ARM_THREAD_STATE         1
57 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE
58 #define ARM_VFP_STATE            2
59 #define ARM_EXCEPTION_STATE      3
60 #define ARM_DEBUG_STATE          4 /* pre-armv8 */
61 #define THREAD_STATE_NONE        5
62 #define ARM_THREAD_STATE64       6
63 #define ARM_EXCEPTION_STATE64    7
64 //      ARM_THREAD_STATE_LAST    8 /* legacy */
65 #define ARM_THREAD_STATE32       9
66 #define ARM_EXCEPTION_STATE64_V2 10
67 
68 #ifdef XNU_KERNEL_PRIVATE
69 #define X86_THREAD_STATE_NONE    13 /* i386/thread_status.h THREAD_STATE_NONE */
70 #endif /* XNU_KERNEL_PRIVATE */
71 
72 /* API */
73 #define ARM_DEBUG_STATE32        14
74 #define ARM_DEBUG_STATE64        15
75 #define ARM_NEON_STATE           16
76 #define ARM_NEON_STATE64         17
77 #define ARM_CPMU_STATE64         18
78 
79 #ifdef XNU_KERNEL_PRIVATE
80 /* For kernel use */
81 #define ARM_SAVED_STATE32        20
82 #define ARM_SAVED_STATE64        21
83 #define ARM_NEON_SAVED_STATE32   22
84 #define ARM_NEON_SAVED_STATE64   23
85 #endif /* XNU_KERNEL_PRIVATE */
86 
87 #define ARM_PAGEIN_STATE         27
88 
89 /* API */
90 #define ARM_SME_STATE            28
91 #define ARM_SVE_Z_STATE1         29
92 #define ARM_SVE_Z_STATE2         30
93 #define ARM_SVE_P_STATE          31
94 #define ARM_SME_ZA_STATE1        32
95 #define ARM_SME_ZA_STATE2        33
96 #define ARM_SME_ZA_STATE3        34
97 #define ARM_SME_ZA_STATE4        35
98 #define ARM_SME_ZA_STATE5        36
99 #define ARM_SME_ZA_STATE6        37
100 #define ARM_SME_ZA_STATE7        38
101 #define ARM_SME_ZA_STATE8        39
102 #define ARM_SME_ZA_STATE9        40
103 #define ARM_SME_ZA_STATE10       41
104 #define ARM_SME_ZA_STATE11       42
105 #define ARM_SME_ZA_STATE12       42
106 #define ARM_SME_ZA_STATE13       44
107 #define ARM_SME_ZA_STATE14       45
108 #define ARM_SME_ZA_STATE15       46
109 #define ARM_SME_ZA_STATE16       47
110 #define ARM_SME2_STATE           48
111 #if XNU_KERNEL_PRIVATE
112 /* For kernel use */
113 #define ARM_SME_SAVED_STATE      49
114 #endif /* XNU_KERNEL_PRIVATE */
115 
116 #define THREAD_STATE_FLAVORS     50     /* This must be updated to 1 more than the highest numerical state flavor */
117 
118 #ifndef ARM_STATE_FLAVOR_IS_OTHER_VALID
119 #define ARM_STATE_FLAVOR_IS_OTHER_VALID(_flavor_) 0
120 #endif
121 
122 #define FLAVOR_MODIFIES_CORE_CPU_REGISTERS(x) \
123 ((x == ARM_THREAD_STATE) ||     \
124  (x == ARM_THREAD_STATE32) ||   \
125  (x == ARM_THREAD_STATE64))
126 
127 #define VALID_THREAD_STATE_FLAVOR(x) \
128 	((x == ARM_THREAD_STATE) ||           \
129 	 (x == ARM_VFP_STATE) ||              \
130 	 (x == ARM_EXCEPTION_STATE) ||        \
131 	 (x == ARM_DEBUG_STATE) ||            \
132 	 (x == THREAD_STATE_NONE) ||          \
133 	 (x == ARM_THREAD_STATE32) ||         \
134 	 (x == ARM_THREAD_STATE64) ||         \
135 	 (x == ARM_EXCEPTION_STATE64) ||      \
136 	 (x == ARM_EXCEPTION_STATE64_V2) ||      \
137 	 (x == ARM_NEON_STATE) ||             \
138 	 (x == ARM_NEON_STATE64) ||           \
139 	 (x == ARM_DEBUG_STATE32) ||          \
140 	 (x == ARM_DEBUG_STATE64) ||          \
141 	 (x == ARM_PAGEIN_STATE) ||           \
142 	 (ARM_STATE_FLAVOR_IS_OTHER_VALID(x)))
143 /*
144  * VALID_THREAD_STATE_FLAVOR() intentionally excludes ARM_SME_STATE through
145  * ARM_SME2_STATE, since these are not currently supported inside Mach exception
146  * ports.
147  */
148 
149 struct arm_state_hdr {
150 	uint32_t flavor;
151 	uint32_t count;
152 };
153 typedef struct arm_state_hdr arm_state_hdr_t;
154 
155 typedef _STRUCT_ARM_THREAD_STATE   arm_thread_state_t;
156 typedef _STRUCT_ARM_THREAD_STATE   arm_thread_state32_t;
157 typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t;
158 
159 #if !defined(KERNEL)
160 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__)
161 
162 /* Accessor macros for arm_thread_state64_t pointer fields */
163 
164 /* Return pc field of arm_thread_state64_t as a data pointer value */
165 #define arm_thread_state64_get_pc(ts) \
166 	        __darwin_arm_thread_state64_get_pc(ts)
167 /* Return pc field of arm_thread_state64_t as a function pointer. May return
168  * NULL if a valid function pointer cannot be constructed, the caller should
169  * fall back to the arm_thread_state64_get_pc() macro in that case. */
170 #define arm_thread_state64_get_pc_fptr(ts) \
171 	        __darwin_arm_thread_state64_get_pc_fptr(ts)
172 /* Set pc field of arm_thread_state64_t to a function pointer */
173 #define arm_thread_state64_set_pc_fptr(ts, fptr) \
174 	        __darwin_arm_thread_state64_set_pc_fptr(ts, fptr)
175 /* Set pc field of arm_thread_state64_t to an already signed function pointer */
176 #define arm_thread_state64_set_pc_presigned_fptr(ts, fptr) \
177 	        __darwin_arm_thread_state64_set_pc_presigned_fptr(ts, fptr)
178 /* Return lr field of arm_thread_state64_t as a data pointer value */
179 #define arm_thread_state64_get_lr(ts) \
180 	        __darwin_arm_thread_state64_get_lr(ts)
181 /* Return lr field of arm_thread_state64_t as a function pointer. May return
182  * NULL if a valid function pointer cannot be constructed, the caller should
183  * fall back to the arm_thread_state64_get_lr() macro in that case. */
184 #define arm_thread_state64_get_lr_fptr(ts) \
185 	        __darwin_arm_thread_state64_get_lr_fptr(ts)
186 /* Set lr field of arm_thread_state64_t to a function pointer */
187 #define arm_thread_state64_set_lr_fptr(ts, fptr) \
188 	        __darwin_arm_thread_state64_set_lr_fptr(ts, fptr)
189 /* Set lr field of arm_thread_state64_t to an already signed function pointer */
190 #define arm_thread_state64_set_lr_presigned_fptr(ts, fptr) \
191 	        __darwin_arm_thread_state64_set_lr_presigned_fptr(ts, fptr)
192 /* Return sp field of arm_thread_state64_t as a data pointer value */
193 #define arm_thread_state64_get_sp(ts) \
194 	        __darwin_arm_thread_state64_get_sp(ts)
195 /* Set sp field of arm_thread_state64_t to a data pointer value */
196 #define arm_thread_state64_set_sp(ts, ptr) \
197 	        __darwin_arm_thread_state64_set_sp(ts, ptr)
198 /* Return fp field of arm_thread_state64_t as a data pointer value */
199 #define arm_thread_state64_get_fp(ts) \
200 	        __darwin_arm_thread_state64_get_fp(ts)
201 /* Set fp field of arm_thread_state64_t to a data pointer value */
202 #define arm_thread_state64_set_fp(ts, ptr) \
203 	        __darwin_arm_thread_state64_set_fp(ts, ptr)
204 /* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */
205 #define arm_thread_state64_ptrauth_strip(ts) \
206 	        __darwin_arm_thread_state64_ptrauth_strip(ts)
207 
208 #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */
209 #endif /* !defined(KERNEL) */
210 
211 struct arm_unified_thread_state {
212 	arm_state_hdr_t ash;
213 	union {
214 		arm_thread_state32_t ts_32;
215 		arm_thread_state64_t ts_64;
216 	} uts;
217 };
218 #define ts_32 uts.ts_32
219 #define ts_64 uts.ts_64
220 typedef struct arm_unified_thread_state arm_unified_thread_state_t;
221 
222 #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
223 	(sizeof (arm_thread_state_t)/sizeof(uint32_t)))
224 #define ARM_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \
225 	(sizeof (arm_thread_state32_t)/sizeof(uint32_t)))
226 #define ARM_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \
227 	(sizeof (arm_thread_state64_t)/sizeof(uint32_t)))
228 #define ARM_UNIFIED_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
229 	(sizeof (arm_unified_thread_state_t)/sizeof(uint32_t)))
230 
231 
232 typedef _STRUCT_ARM_VFP_STATE         arm_vfp_state_t;
233 typedef _STRUCT_ARM_NEON_STATE        arm_neon_state_t;
234 typedef _STRUCT_ARM_NEON_STATE        arm_neon_state32_t;
235 typedef _STRUCT_ARM_NEON_STATE64      arm_neon_state64_t;
236 
237 
238 typedef _STRUCT_ARM_EXCEPTION_STATE   arm_exception_state_t;
239 typedef _STRUCT_ARM_EXCEPTION_STATE   arm_exception_state32_t;
240 typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t;
241 typedef _STRUCT_ARM_EXCEPTION_STATE64_V2 arm_exception_state64_v2_t;
242 
243 typedef _STRUCT_ARM_DEBUG_STATE32     arm_debug_state32_t;
244 typedef _STRUCT_ARM_DEBUG_STATE64     arm_debug_state64_t;
245 
246 typedef _STRUCT_ARM_PAGEIN_STATE      arm_pagein_state_t;
247 
248 typedef _STRUCT_ARM_SME_STATE         arm_sme_state_t;
249 typedef _STRUCT_ARM_SVE_Z_STATE       arm_sve_z_state_t;
250 typedef _STRUCT_ARM_SVE_P_STATE       arm_sve_p_state_t;
251 typedef _STRUCT_ARM_SME_ZA_STATE      arm_sme_za_state_t;
252 typedef _STRUCT_ARM_SME2_STATE        arm_sme2_state_t;
253 
254 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
255 /* See below for ARM64 kernel structure definition for arm_debug_state. */
256 #else /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
257 /*
258  * Otherwise not ARM64 kernel and we must preserve legacy ARM definitions of
259  * arm_debug_state for binary compatability of userland consumers of this file.
260  */
261 #if defined(__arm__)
262 typedef _STRUCT_ARM_DEBUG_STATE        arm_debug_state_t;
263 #elif defined(__arm64__)
264 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t;
265 #else /* defined(__arm__) */
266 #error Undefined architecture
267 #endif /* defined(__arm__) */
268 #endif /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
269 
270 #define ARM_VFP_STATE_COUNT ((mach_msg_type_number_t) \
271 	(sizeof (arm_vfp_state_t)/sizeof(uint32_t)))
272 
273 #define ARM_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \
274 	(sizeof (arm_exception_state_t)/sizeof(uint32_t)))
275 
276 #define ARM_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \
277 	(sizeof (arm_exception_state64_t)/sizeof(uint32_t)))
278 
279 #define ARM_EXCEPTION_STATE64_V2_COUNT ((mach_msg_type_number_t) \
280 	(sizeof (arm_exception_state64_v2_t)/sizeof(uint32_t)))
281 
282 #define ARM_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
283 	(sizeof (arm_debug_state_t)/sizeof(uint32_t)))
284 
285 #define ARM_DEBUG_STATE32_COUNT ((mach_msg_type_number_t) \
286 	(sizeof (arm_debug_state32_t)/sizeof(uint32_t)))
287 
288 #define ARM_PAGEIN_STATE_COUNT ((mach_msg_type_number_t) \
289 	(sizeof (arm_pagein_state_t)/sizeof(uint32_t)))
290 
291 #define ARM_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \
292 	(sizeof (arm_debug_state64_t)/sizeof(uint32_t)))
293 
294 #define ARM_NEON_STATE_COUNT ((mach_msg_type_number_t) \
295 	(sizeof (arm_neon_state_t)/sizeof(uint32_t)))
296 
297 #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \
298 	(sizeof (arm_neon_state64_t)/sizeof(uint32_t)))
299 
300 #define ARM_SME_STATE_COUNT ((mach_msg_type_number_t) \
301 	(sizeof (arm_sme_state_t)/sizeof(uint32_t)))
302 
303 #define ARM_SVE_Z_STATE_COUNT ((mach_msg_type_number_t) \
304 	(sizeof (arm_sve_z_state_t)/sizeof(uint32_t)))
305 
306 #define ARM_SVE_P_STATE_COUNT ((mach_msg_type_number_t) \
307 	(sizeof (arm_sve_p_state_t)/sizeof(uint32_t)))
308 
309 #define ARM_SME_ZA_STATE_COUNT ((mach_msg_type_number_t) \
310 	(sizeof (arm_sme_za_state_t)/sizeof(uint32_t)))
311 
312 #define ARM_SME2_STATE_COUNT ((mach_msg_type_number_t) \
313 	(sizeof (arm_sme2_state_t)/sizeof(uint32_t)))
314 
315 #define MACHINE_THREAD_STATE       ARM_THREAD_STATE
316 #define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
317 
318 
319 /*
320  * Largest state on this machine:
321  */
322 #define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX
323 
324 #ifdef XNU_KERNEL_PRIVATE
325 
326 #if CONFIG_DTRACE
327 #define HAS_ADD_SAVED_STATE_PC          1
328 #define HAS_SET_SAVED_STATE_PC          1
329 #define HAS_SET_SAVED_STATE_LR          1
330 #define HAS_SET_SAVED_STATE_REG         1
331 #define HAS_MASK_SAVED_STATE_CPSR       1
332 #endif /* CONFIG_DTRACE */
333 
334 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
335 #define HAS_SET_SAVED_STATE_CPSR        1
336 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
337 
338 #if CONFIG_XNUPOST
339 #define HAS_ADD_SAVED_STATE_PC          1
340 #define HAS_SET_SAVED_STATE_PC          1
341 #define HAS_SET_SAVED_STATE_CPSR        1
342 #endif /* CONFIG_DTRACE */
343 
344 #if DEBUG || DEVELOPMENT
345 #define HAS_ADD_SAVED_STATE_PC          1
346 #endif
347 
348 #if CONFIG_BTI_TELEMETRY
349 /* BTI Telemetry needs CPSR to recover from BTI exceptions */
350 #define HAS_SET_SAVED_STATE_CPSR        1
351 #endif /* CONFIG_HAS_BTI_TELEMETRY */
352 
353 
354 static inline boolean_t
is_thread_state32(const arm_unified_thread_state_t * its)355 is_thread_state32(const arm_unified_thread_state_t *its)
356 {
357 	return its->ash.flavor == ARM_THREAD_STATE32;
358 }
359 
360 static inline boolean_t
is_thread_state64(const arm_unified_thread_state_t * its)361 is_thread_state64(const arm_unified_thread_state_t *its)
362 {
363 	return its->ash.flavor == ARM_THREAD_STATE64;
364 }
365 
366 static inline arm_thread_state32_t*
thread_state32(arm_unified_thread_state_t * its)367 thread_state32(arm_unified_thread_state_t *its)
368 {
369 	return &its->ts_32;
370 }
371 
372 static inline arm_thread_state64_t*
thread_state64(arm_unified_thread_state_t * its)373 thread_state64(arm_unified_thread_state_t *its)
374 {
375 	return &its->ts_64;
376 }
377 
378 static inline const arm_thread_state32_t*
const_thread_state32(const arm_unified_thread_state_t * its)379 const_thread_state32(const arm_unified_thread_state_t *its)
380 {
381 	return &its->ts_32;
382 }
383 
384 static inline const arm_thread_state64_t*
const_thread_state64(const arm_unified_thread_state_t * its)385 const_thread_state64(const arm_unified_thread_state_t *its)
386 {
387 	return &its->ts_64;
388 }
389 
390 #if defined(__arm64__)
391 
392 #include <kern/assert.h>
393 #include <arm64/proc_reg.h>
394 #define CAST_ASSERT_SAFE(type, val) (assert((val) == ((type)(val))), (type)(val))
395 
396 /*
397  * GPR context
398  */
399 
400 struct arm_saved_state32 {
401 	uint32_t r[13];     /* General purpose register r0-r12 */
402 	uint32_t sp;        /* Stack pointer r13 */
403 	uint32_t lr;        /* Link register r14 */
404 	uint32_t pc;        /* Program counter r15 */
405 	uint32_t cpsr;      /* Current program status register */
406 	uint32_t far;       /* Virtual fault address */
407 	uint32_t esr;       /* Exception syndrome register */
408 	uint32_t exception; /* Exception number */
409 };
410 typedef struct arm_saved_state32 arm_saved_state32_t;
411 
412 struct arm_saved_state32_tagged {
413 	uint32_t                 tag;
414 	struct arm_saved_state32 state;
415 };
416 typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t;
417 
418 #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
419 	        (sizeof(arm_saved_state32_t)/sizeof(unsigned int)))
420 
421 struct arm_saved_state64 {
422 	uint64_t x[29];     /* General purpose registers x0-x28 */
423 	uint64_t fp;        /* Frame pointer x29 */
424 	uint64_t lr;        /* Link register x30 */
425 	uint64_t sp;        /* Stack pointer x31 */
426 	uint64_t pc;        /* Program counter */
427 	uint32_t cpsr;      /* Current program status register */
428 	uint32_t reserved;  /* Reserved padding */
429 	uint64_t far;       /* Virtual fault address */
430 	uint64_t esr;       /* Exception syndrome register */
431 #if HAS_APPLE_PAC
432 	uint64_t jophash;
433 #endif /* HAS_APPLE_PAC */
434 };
435 typedef struct arm_saved_state64 arm_saved_state64_t;
436 
437 #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
438 	(sizeof(arm_saved_state64_t)/sizeof(unsigned int)))
439 
440 struct arm_saved_state64_tagged {
441 	uint32_t                 tag;
442 	struct arm_saved_state64 state;
443 };
444 typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t;
445 
446 struct arm_saved_state {
447 	arm_state_hdr_t ash;
448 	union {
449 		struct arm_saved_state32 ss_32;
450 		struct arm_saved_state64 ss_64;
451 	} uss;
452 } __attribute__((aligned(16)));
453 #define ss_32 uss.ss_32
454 #define ss_64 uss.ss_64
455 
456 typedef struct arm_saved_state arm_saved_state_t;
457 
458 struct arm_kernel_saved_state {
459 	uint64_t x[10];     /* General purpose registers x19-x28 */
460 	uint64_t fp;        /* Frame pointer x29 */
461 	uint64_t lr;        /* Link register x30 */
462 	uint64_t sp;        /* Stack pointer x31 */
463 	/* Some things here we DO need to preserve */
464 	uint8_t pc_was_in_userspace;
465 	uint8_t ssbs;
466 	uint8_t dit;
467 	uint8_t uao;
468 } __attribute__((aligned(16)));
469 
470 typedef struct arm_kernel_saved_state arm_kernel_saved_state_t;
471 
472 extern void ml_panic_on_invalid_old_cpsr(const arm_saved_state_t *) __attribute__((noreturn));
473 
474 extern void ml_panic_on_invalid_new_cpsr(const arm_saved_state_t *, uint32_t) __attribute__((noreturn));
475 
476 #if HAS_APPLE_PAC
477 
478 #include <sys/cdefs.h>
479 
480 /*
481  * Used by MANIPULATE_SIGNED_THREAD_STATE(), potentially from C++ (IOKit) code.
482  * Open-coded to prevent a circular dependency between mach/arm/thread_status.h
483  * and osfmk/arm/machine_routines.h.
484  */
485 __BEGIN_DECLS
486 extern uint64_t ml_pac_safe_interrupts_disable(void);
487 extern void ml_pac_safe_interrupts_restore(uint64_t);
488 __END_DECLS
489 
490 /*
491  * Methods used to sign and check thread state to detect corruptions of saved
492  * thread state across exceptions and context switches.
493  */
494 extern void ml_sign_thread_state(arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
495 
496 extern void ml_check_signed_state(const arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
497 
498 /* XXX: including stddef.f here breaks ctfmerge on some builds, so use __builtin_offsetof() instead of offsetof() */
499 #define ss64_offsetof(x) __builtin_offsetof(struct arm_saved_state, ss_64.x)
500 
501 /**
502  * Verify the signed thread state in _iss, execute the assembly instructions
503  * _instr, and re-sign the modified thread state.  Varargs specify additional
504  * inputs.
505  *
506  * _instr may read or modify the thread state in the following registers:
507  *
508  * x0: _iss
509  * x1: authed _iss->ss_64.pc
510  * w2: authed _iss->ss_64.cpsr
511  * x3: authed _iss->ss_64.lr
512  * x4: authed _iss->ss_64.x16
513  * x5: authed _iss->ss_64.x17
514  * x6: scratch register
515  * x7: scratch register
516  * x8: scratch register
517  *
518  * If _instr makes no changes to the thread state, it may skip re-signing by
519  * branching to label 0.
520  */
521 #define MANIPULATE_SIGNED_THREAD_STATE(_iss, _instr, ...)                       \
522 	do {                                                                    \
523 	        uint64_t _intr = ml_pac_safe_interrupts_disable();              \
524 	        asm volatile (                                                  \
525 	                "mov	x9, lr"				"\n"            \
526 	                "mov	x0, %[iss]"			"\n"            \
527 	                "msr	SPSel, #1"			"\n"            \
528 	                "ldp	x4, x5, [x0, %[SS64_X16]]"	"\n"            \
529 	                "ldr	x7, [x0, %[SS64_PC]]"		"\n"            \
530 	                "ldr	w8, [x0, %[SS64_CPSR]]"		"\n"            \
531 	                "ldr	x3, [x0, %[SS64_LR]]"		"\n"            \
532 	                "mov	x1, x7"				"\n"            \
533 	                "mov	w2, w8"				"\n"            \
534 	                "bl	_ml_check_signed_state"		"\n"            \
535 	                "mov	x1, x7"				"\n"            \
536 	                "mov	w2, w8"				"\n"            \
537 	                _instr					"\n"            \
538 	                "bl	_ml_sign_thread_state"		"\n"            \
539 	                "0:"					"\n"            \
540 	                "msr	SPSel, #0"			"\n"            \
541 	                "mov	lr, x9"				"\n"            \
542 	                :                                                       \
543 	                : [iss]         "r"(_iss),                              \
544 	                  [SS64_X16]	"i"(ss64_offsetof(x[16])),              \
545 	                  [SS64_PC]	"i"(ss64_offsetof(pc)),                 \
546 	                  [SS64_CPSR]	"i"(ss64_offsetof(cpsr)),               \
547 	                  [SS64_LR]	"i"(ss64_offsetof(lr)),##__VA_ARGS__    \
548 	                : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", \
549 	                  "x9", "x16", "x17"                                    \
550 	        );                                                              \
551 	        ml_pac_safe_interrupts_restore(_intr);                          \
552 	} while (0)
553 
554 #define VERIFY_USER_THREAD_STATE_INSTR                                          \
555 	        "and	w6, w2, %[CPSR_EL_MASK]"		"\n"            \
556 	        "cmp	w6, %[CPSR_EL0]"			"\n"            \
557 	        "b.eq	1f"					"\n"            \
558 	        "bl	_ml_panic_on_invalid_old_cpsr"		"\n"            \
559 	        "brk	#0"					"\n"            \
560 	"1:"							"\n"
561 
562 #define VERIFY_USER_THREAD_STATE_INPUTS                                         \
563 	[CPSR_EL_MASK]	"i"(PSR64_MODE_EL_MASK),                                \
564 	[CPSR_EL0]	"i"(PSR64_MODE_EL0)
565 
566 #define MANIPULATE_SIGNED_USER_THREAD_STATE(_iss, _instr, ...)                  \
567 	MANIPULATE_SIGNED_THREAD_STATE(_iss,                                    \
568 	        VERIFY_USER_THREAD_STATE_INSTR                                  \
569 	        _instr,                                                         \
570 	        VERIFY_USER_THREAD_STATE_INPUTS, ##__VA_ARGS__)
571 
572 static inline void
check_and_sign_copied_user_thread_state(arm_saved_state_t * dst,const arm_saved_state_t * src)573 check_and_sign_copied_user_thread_state(arm_saved_state_t *dst, const arm_saved_state_t *src)
574 {
575 	MANIPULATE_SIGNED_USER_THREAD_STATE(src,
576 	    "mov	x0, %[dst]",
577 	    [dst] "r"(dst)
578 	    );
579 }
580 #endif /* HAS_APPLE_PAC */
581 
582 static inline boolean_t
is_saved_state32(const arm_saved_state_t * iss)583 is_saved_state32(const arm_saved_state_t *iss)
584 {
585 	return iss->ash.flavor == ARM_SAVED_STATE32;
586 }
587 
588 static inline boolean_t
is_saved_state64(const arm_saved_state_t * iss)589 is_saved_state64(const arm_saved_state_t *iss)
590 {
591 	return iss->ash.flavor == ARM_SAVED_STATE64;
592 }
593 
594 static inline arm_saved_state32_t*
saved_state32(arm_saved_state_t * iss)595 saved_state32(arm_saved_state_t *iss)
596 {
597 	return &iss->ss_32;
598 }
599 
600 static inline const arm_saved_state32_t*
const_saved_state32(const arm_saved_state_t * iss)601 const_saved_state32(const arm_saved_state_t *iss)
602 {
603 	return &iss->ss_32;
604 }
605 
606 static inline arm_saved_state64_t*
saved_state64(arm_saved_state_t * iss)607 saved_state64(arm_saved_state_t *iss)
608 {
609 	return &iss->ss_64;
610 }
611 
612 static inline const arm_saved_state64_t*
const_saved_state64(const arm_saved_state_t * iss)613 const_saved_state64(const arm_saved_state_t *iss)
614 {
615 	return &iss->ss_64;
616 }
617 
618 static inline register_t
get_saved_state_pc(const arm_saved_state_t * iss)619 get_saved_state_pc(const arm_saved_state_t *iss)
620 {
621 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc);
622 }
623 
624 #if HAS_ADD_SAVED_STATE_PC
625 static inline void
add_saved_state_pc(arm_saved_state_t * iss,int diff)626 add_saved_state_pc(arm_saved_state_t *iss, int diff)
627 {
628 	if (is_saved_state32(iss)) {
629 		uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
630 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
631 	} else {
632 #if HAS_APPLE_PAC
633 		MANIPULATE_SIGNED_THREAD_STATE(iss,
634 		    "mov	w6, %w[diff]		\n"
635 		    "add	x1, x1, w6, sxtw	\n"
636 		    "str	x1, [x0, %[SS64_PC]]	\n",
637 		    [diff] "r"(diff)
638 		    );
639 #else
640 		saved_state64(iss)->pc += (unsigned long)diff;
641 #endif /* HAS_APPLE_PAC */
642 	}
643 }
644 #endif /* HAS_ADD_SAVED_STATE_PC */
645 
646 static inline void
add_user_saved_state_pc(arm_saved_state_t * iss,int diff)647 add_user_saved_state_pc(arm_saved_state_t *iss, int diff)
648 {
649 	if (is_saved_state32(iss)) {
650 		uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
651 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
652 	} else {
653 #if HAS_APPLE_PAC
654 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
655 		    "mov	w6, %w[diff]		\n"
656 		    "add	x1, x1, w6, sxtw	\n"
657 		    "str	x1, [x0, %[SS64_PC]]	\n",
658 		    [diff] "r"(diff)
659 		    );
660 #else
661 		saved_state64(iss)->pc += (unsigned long)diff;
662 #endif /* HAS_APPLE_PAC */
663 	}
664 }
665 
666 #if HAS_SET_SAVED_STATE_PC
667 static inline void
set_saved_state_pc(arm_saved_state_t * iss,register_t pc)668 set_saved_state_pc(arm_saved_state_t *iss, register_t pc)
669 {
670 	if (is_saved_state32(iss)) {
671 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
672 	} else {
673 #if HAS_APPLE_PAC
674 		MANIPULATE_SIGNED_THREAD_STATE(iss,
675 		    "mov	x1, %[pc]		\n"
676 		    "str	x1, [x0, %[SS64_PC]]	\n",
677 		    [pc] "r"(pc)
678 		    );
679 #else
680 		saved_state64(iss)->pc = (unsigned long)pc;
681 #endif /* HAS_APPLE_PAC */
682 	}
683 }
684 #endif /* HAS_SET_SAVED_STATE_PC */
685 
686 static inline void
set_user_saved_state_pc(arm_saved_state_t * iss,register_t pc)687 set_user_saved_state_pc(arm_saved_state_t *iss, register_t pc)
688 {
689 	if (is_saved_state32(iss)) {
690 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
691 	} else {
692 #if HAS_APPLE_PAC
693 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
694 		    "mov	x1, %[pc]		\n"
695 		    "str	x1, [x0, %[SS64_PC]]	\n",
696 		    [pc] "r"(pc)
697 		    );
698 #else
699 		saved_state64(iss)->pc = (unsigned long)pc;
700 #endif /* HAS_APPLE_PAC */
701 	}
702 }
703 
704 static inline register_t
get_saved_state_sp(const arm_saved_state_t * iss)705 get_saved_state_sp(const arm_saved_state_t *iss)
706 {
707 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp);
708 }
709 
710 static inline void
set_saved_state_sp(arm_saved_state_t * iss,register_t sp)711 set_saved_state_sp(arm_saved_state_t *iss, register_t sp)
712 {
713 	if (is_saved_state32(iss)) {
714 		saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp);
715 	} else {
716 		saved_state64(iss)->sp = (uint64_t)sp;
717 	}
718 }
719 
720 static inline register_t
get_saved_state_lr(const arm_saved_state_t * iss)721 get_saved_state_lr(const arm_saved_state_t *iss)
722 {
723 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr);
724 }
725 
726 #if HAS_SET_SAVED_STATE_LR
727 static inline void
set_saved_state_lr(arm_saved_state_t * iss,register_t lr)728 set_saved_state_lr(arm_saved_state_t *iss, register_t lr)
729 {
730 	if (is_saved_state32(iss)) {
731 		saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
732 	} else {
733 #if HAS_APPLE_PAC
734 		MANIPULATE_SIGNED_THREAD_STATE(iss,
735 		    "mov	x3, %[lr]		\n"
736 		    "str	x3, [x0, %[SS64_LR]]	\n",
737 		    [lr] "r"(lr)
738 		    );
739 #else
740 		saved_state64(iss)->lr = (unsigned long)lr;
741 #endif /* HAS_APPLE_PAC */
742 	}
743 }
744 #endif /* HAS_SET_SAVED_STATE_PC */
745 
746 static inline void
set_user_saved_state_lr(arm_saved_state_t * iss,register_t lr)747 set_user_saved_state_lr(arm_saved_state_t *iss, register_t lr)
748 {
749 	if (is_saved_state32(iss)) {
750 		saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
751 	} else {
752 #if HAS_APPLE_PAC
753 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
754 		    "mov	x3, %[lr]		\n"
755 		    "str	x3, [x0, %[SS64_LR]]	\n",
756 		    [lr] "r"(lr)
757 		    );
758 #else
759 		saved_state64(iss)->lr = (unsigned long)lr;
760 #endif /* HAS_APPLE_PAC */
761 	}
762 }
763 
764 static inline register_t
get_saved_state_fp(const arm_saved_state_t * iss)765 get_saved_state_fp(const arm_saved_state_t *iss)
766 {
767 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp);
768 }
769 
770 static inline void
set_saved_state_fp(arm_saved_state_t * iss,register_t fp)771 set_saved_state_fp(arm_saved_state_t *iss, register_t fp)
772 {
773 	if (is_saved_state32(iss)) {
774 		saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp);
775 	} else {
776 		saved_state64(iss)->fp = (uint64_t)fp;
777 	}
778 }
779 
780 static inline int
check_saved_state_reglimit(const arm_saved_state_t * iss,unsigned reg)781 check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg)
782 {
783 	return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT);
784 }
785 
786 static inline register_t
get_saved_state_reg(const arm_saved_state_t * iss,unsigned reg)787 get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg)
788 {
789 	if (!check_saved_state_reglimit(iss, reg)) {
790 		return 0;
791 	}
792 
793 	return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]));
794 }
795 
796 #if HAS_SET_SAVED_STATE_REG
797 static inline void
set_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)798 set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
799 {
800 	if (!check_saved_state_reglimit(iss, reg)) {
801 		return;
802 	}
803 
804 	if (is_saved_state32(iss)) {
805 		saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
806 	} else {
807 #if HAS_APPLE_PAC
808 		/* x16 and x17 are part of the jophash */
809 		if (reg == 16) {
810 			MANIPULATE_SIGNED_THREAD_STATE(iss,
811 			    "mov	x4, %[value]		\n"
812 			    "str	x4, [x0, %[SS64_X16]]	\n",
813 			    [value] "r"(value)
814 			    );
815 			return;
816 		} else if (reg == 17) {
817 			MANIPULATE_SIGNED_THREAD_STATE(iss,
818 			    "mov	x5, %[value]		\n"
819 			    "str	x5, [x0, %[SS64_X17]]	\n",
820 			    [value] "r"(value),
821 			    [SS64_X17] "i"(ss64_offsetof(x[17]))
822 			    );
823 			return;
824 		}
825 #endif
826 		saved_state64(iss)->x[reg] = (uint64_t)value;
827 	}
828 }
829 #endif /* HAS_SET_SAVED_STATE_REG */
830 
831 static inline void
set_user_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)832 set_user_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
833 {
834 	if (!check_saved_state_reglimit(iss, reg)) {
835 		return;
836 	}
837 
838 	if (is_saved_state32(iss)) {
839 		saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
840 	} else {
841 #if HAS_APPLE_PAC
842 		/* x16 and x17 are part of the jophash */
843 		if (reg == 16) {
844 			MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
845 			    "mov	x4, %[value]		\n"
846 			    "str	x4, [x0, %[SS64_X16]]	\n",
847 			    [value] "r"(value)
848 			    );
849 			return;
850 		} else if (reg == 17) {
851 			MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
852 			    "mov	x5, %[value]		\n"
853 			    "str	x5, [x0, %[SS64_X17]]	\n",
854 			    [value] "r"(value),
855 			    [SS64_X17] "i"(ss64_offsetof(x[17]))
856 			    );
857 			return;
858 		}
859 #endif
860 		saved_state64(iss)->x[reg] = (uint64_t)value;
861 	}
862 }
863 
864 
865 static inline uint32_t
get_saved_state_cpsr(const arm_saved_state_t * iss)866 get_saved_state_cpsr(const arm_saved_state_t *iss)
867 {
868 	return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr;
869 }
870 
871 #if HAS_MASK_SAVED_STATE_CPSR
872 static inline void
mask_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)873 mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
874 {
875 	if (is_saved_state32(iss)) {
876 		saved_state32(iss)->cpsr |= set_bits;
877 		saved_state32(iss)->cpsr &= ~clear_bits;
878 	} else {
879 #if HAS_APPLE_PAC
880 		MANIPULATE_SIGNED_THREAD_STATE(iss,
881 		    "mov	w6, %w[set_bits]	\n"
882 		    "orr	w2, w2, w6, lsl #0	\n"
883 		    "mov	w6, %w[clear_bits]	\n"
884 		    "bic	w2, w2, w6, lsl #0	\n"
885 		    "str	w2, [x0, %[SS64_CPSR]]	\n",
886 		    [set_bits] "r"(set_bits),
887 		    [clear_bits] "r"(clear_bits)
888 		    );
889 #else
890 		saved_state64(iss)->cpsr |= set_bits;
891 		saved_state64(iss)->cpsr &= ~clear_bits;
892 #endif /* HAS_APPLE_PAC */
893 	}
894 }
895 #endif /* HAS_MASK_SAVED_STATE_CPSR */
896 
897 static inline void
mask_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)898 mask_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
899 {
900 	if (is_saved_state32(iss)) {
901 		uint32_t new_cpsr = saved_state32(iss)->cpsr;
902 		new_cpsr |= set_bits;
903 		new_cpsr &= ~clear_bits;
904 		if (!PSR_IS_USER(new_cpsr)) {
905 			ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
906 		}
907 		saved_state32(iss)->cpsr = new_cpsr;
908 	} else {
909 #if HAS_APPLE_PAC
910 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
911 		    "mov	w6, %w[set_bits]		\n"
912 		    "orr	w2, w2, w6, lsl #0		\n"
913 		    "mov	w6, %w[clear_bits]		\n"
914 		    "bic	w2, w2, w6, lsl #0		\n"
915 		    "and	w6, w2, %[CPSR_EL_MASK]		\n"
916 		    "cmp	w6, %[CPSR_EL0]			\n"
917 		    "b.eq	1f				\n"
918 		    "mov	w1, w2				\n"
919 		    "bl		_ml_panic_on_invalid_new_cpsr	\n"
920 		    "brk	#0				\n"
921 		    "1:						\n"
922 		    "str	w2, [x0, %[SS64_CPSR]]		\n",
923 		    [set_bits] "r"(set_bits),
924 		    [clear_bits] "r"(clear_bits)
925 		    );
926 #else
927 		uint32_t new_cpsr = saved_state64(iss)->cpsr;
928 		new_cpsr |= set_bits;
929 		new_cpsr &= ~clear_bits;
930 		if (!PSR64_IS_USER(new_cpsr)) {
931 			ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
932 		}
933 		saved_state64(iss)->cpsr = new_cpsr;
934 #endif /* HAS_APPLE_PAC */
935 	}
936 }
937 
938 #if HAS_SET_SAVED_STATE_CPSR
939 static inline void
set_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)940 set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
941 {
942 	if (is_saved_state32(iss)) {
943 		saved_state32(iss)->cpsr = cpsr;
944 	} else {
945 #if HAS_APPLE_PAC
946 		MANIPULATE_SIGNED_THREAD_STATE(iss,
947 		    "mov	w2, %w[cpsr]		\n"
948 		    "str	w2, [x0, %[SS64_CPSR]]	\n",
949 		    [cpsr] "r"(cpsr)
950 		    );
951 #else
952 		saved_state64(iss)->cpsr = cpsr;
953 #endif /* HAS_APPLE_PAC */
954 	}
955 }
956 #endif /* HAS_SET_SAVED_STATE_CPSR */
957 
958 static inline void
set_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)959 set_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
960 {
961 	if (is_saved_state32(iss)) {
962 		if (!PSR_IS_USER(cpsr)) {
963 			ml_panic_on_invalid_new_cpsr(iss, cpsr);
964 		}
965 		saved_state32(iss)->cpsr = cpsr;
966 	} else {
967 #if HAS_APPLE_PAC
968 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
969 		    "mov	w2, %w[cpsr]			\n"
970 		    "and	w6, w2, %[CPSR_EL_MASK]		\n"
971 		    "cmp	w6, %[CPSR_EL0]			\n"
972 		    "b.eq	1f				\n"
973 		    "mov	w1, w2				\n"
974 		    "bl		_ml_panic_on_invalid_new_cpsr	\n"
975 		    "brk	#0				\n"
976 		    "1:						\n"
977 		    "str	w2, [x0, %[SS64_CPSR]]		\n",
978 		    [cpsr] "r"(cpsr)
979 		    );
980 #else
981 		if (!PSR64_IS_USER(cpsr)) {
982 			ml_panic_on_invalid_new_cpsr(iss, cpsr);
983 		}
984 		saved_state64(iss)->cpsr = cpsr;
985 #endif /* HAS_APPLE_PAC */
986 	}
987 }
988 
989 static inline register_t
get_saved_state_far(const arm_saved_state_t * iss)990 get_saved_state_far(const arm_saved_state_t *iss)
991 {
992 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far);
993 }
994 
995 static inline void
set_saved_state_far(arm_saved_state_t * iss,register_t far)996 set_saved_state_far(arm_saved_state_t *iss, register_t far)
997 {
998 	if (is_saved_state32(iss)) {
999 		saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far);
1000 	} else {
1001 		saved_state64(iss)->far = (uint64_t)far;
1002 	}
1003 }
1004 
1005 static inline uint64_t
get_saved_state_esr(const arm_saved_state_t * iss)1006 get_saved_state_esr(const arm_saved_state_t *iss)
1007 {
1008 	return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr;
1009 }
1010 
1011 static inline void
set_saved_state_esr(arm_saved_state_t * iss,uint64_t esr)1012 set_saved_state_esr(arm_saved_state_t *iss, uint64_t esr)
1013 {
1014 	if (is_saved_state32(iss)) {
1015 		assert(esr < (uint64_t) (uint32_t) -1);
1016 		saved_state32(iss)->esr = (uint32_t) esr;
1017 	} else {
1018 		saved_state64(iss)->esr = esr;
1019 	}
1020 }
1021 
1022 extern void panic_unimplemented(void);
1023 
1024 /**
1025  * Extracts the SVC (Supervisor Call) number from the appropriate GPR (General
1026  * Purpose Register).
1027  *
1028  * @param[in] iss the 32-bit or 64-bit ARM saved state (i.e. trap frame).
1029  *
1030  * @return The SVC number.
1031  */
1032 static inline int
get_saved_state_svc_number(const arm_saved_state_t * iss)1033 get_saved_state_svc_number(const arm_saved_state_t *iss)
1034 {
1035 	return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */
1036 }
1037 
1038 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t;
1039 
1040 struct arm_debug_aggregate_state {
1041 	arm_state_hdr_t dsh;
1042 	union {
1043 		arm_debug_state32_t ds32;
1044 		arm_debug_state64_t ds64;
1045 	} uds;
1046 	os_refcnt_t     ref;
1047 } __attribute__((aligned(16)));
1048 
1049 typedef struct arm_debug_aggregate_state arm_debug_state_t;
1050 
1051 #define ARM_LEGACY_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
1052 	(sizeof (arm_legacy_debug_state_t)/sizeof(uint32_t)))
1053 
1054 /*
1055  * NEON context
1056  */
1057 typedef __uint128_t uint128_t;
1058 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2)));
1059 typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4)));
1060 
1061 struct arm_neon_saved_state32 {
1062 	union {
1063 		uint128_t q[16];
1064 		uint64_t  d[32];
1065 		uint32_t  s[32];
1066 	} v;
1067 	uint32_t fpsr;
1068 	uint32_t fpcr;
1069 };
1070 typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t;
1071 
1072 #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
1073 	(sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int)))
1074 
1075 struct arm_neon_saved_state64 {
1076 	union {
1077 		uint128_t  q[32];
1078 		uint64x2_t d[32];
1079 		uint32x4_t s[32];
1080 	} v;
1081 	uint32_t fpsr;
1082 	uint32_t fpcr;
1083 };
1084 typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t;
1085 
1086 #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
1087 	(sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int)))
1088 
1089 struct arm_neon_saved_state {
1090 	arm_state_hdr_t nsh;
1091 	union {
1092 		struct arm_neon_saved_state32 ns_32;
1093 		struct arm_neon_saved_state64 ns_64;
1094 	} uns;
1095 };
1096 typedef struct arm_neon_saved_state arm_neon_saved_state_t;
1097 #define ns_32 uns.ns_32
1098 #define ns_64 uns.ns_64
1099 
1100 struct arm_kernel_neon_saved_state {
1101 	uint64_t d[8];
1102 	uint32_t fpcr;
1103 };
1104 typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t;
1105 
1106 static inline boolean_t
is_neon_saved_state32(const arm_neon_saved_state_t * state)1107 is_neon_saved_state32(const arm_neon_saved_state_t *state)
1108 {
1109 	return state->nsh.flavor == ARM_NEON_SAVED_STATE32;
1110 }
1111 
1112 static inline boolean_t
is_neon_saved_state64(const arm_neon_saved_state_t * state)1113 is_neon_saved_state64(const arm_neon_saved_state_t *state)
1114 {
1115 	return state->nsh.flavor == ARM_NEON_SAVED_STATE64;
1116 }
1117 
1118 static inline arm_neon_saved_state32_t *
neon_state32(arm_neon_saved_state_t * state)1119 neon_state32(arm_neon_saved_state_t *state)
1120 {
1121 	return &state->ns_32;
1122 }
1123 
1124 static inline arm_neon_saved_state64_t *
neon_state64(arm_neon_saved_state_t * state)1125 neon_state64(arm_neon_saved_state_t *state)
1126 {
1127 	return &state->ns_64;
1128 }
1129 
1130 
1131 #if HAS_ARM_FEAT_SME
1132 
1133 
1134 struct arm_sme_saved_state;
1135 typedef struct arm_sme_saved_state arm_sme_saved_state_t;
1136 
1137 #if !__has_ptrcheck
1138 typedef struct {
1139 	uint8_t                 zt0[64];
1140 	uint8_t                 __z_p_za[];
1141 } arm_sme_context_t;
1142 
1143 struct arm_sme_saved_state {
1144 	arm_state_hdr_t         hdr;
1145 	uint64_t                svcr;
1146 	uint16_t                svl_b;
1147 	arm_sme_context_t       context;
1148 };
1149 
1150 static inline size_t
arm_sme_z_size(uint16_t svl_b)1151 arm_sme_z_size(uint16_t svl_b)
1152 {
1153 	return 32 * svl_b;
1154 }
1155 
1156 static inline size_t
arm_sme_p_size(uint16_t svl_b)1157 arm_sme_p_size(uint16_t svl_b)
1158 {
1159 	return 2 * svl_b;
1160 }
1161 
1162 static inline size_t
arm_sme_za_size(uint16_t svl_b)1163 arm_sme_za_size(uint16_t svl_b)
1164 {
1165 	return svl_b * svl_b;
1166 }
1167 
1168 static inline mach_msg_type_number_t
arm_sme_saved_state_count(uint16_t svl_b)1169 arm_sme_saved_state_count(uint16_t svl_b)
1170 {
1171 	assert(svl_b % 16 == 0);
1172 	size_t size = sizeof(arm_sme_saved_state_t) +
1173 	    arm_sme_z_size(svl_b) +
1174 	    arm_sme_p_size(svl_b) +
1175 	    arm_sme_za_size(svl_b);
1176 	return (mach_msg_type_number_t)(size / sizeof(unsigned int));
1177 }
1178 
1179 static inline uint8_t *
arm_sme_z(arm_sme_context_t * ss)1180 arm_sme_z(arm_sme_context_t *ss)
1181 {
1182 	return ss->__z_p_za;
1183 }
1184 
1185 static inline const uint8_t *
const_arm_sme_z(const arm_sme_context_t * ss)1186 const_arm_sme_z(const arm_sme_context_t *ss)
1187 {
1188 	return ss->__z_p_za;
1189 }
1190 
1191 static inline uint8_t *
arm_sme_p(arm_sme_context_t * ss,uint16_t svl_b)1192 arm_sme_p(arm_sme_context_t *ss, uint16_t svl_b)
1193 {
1194 	return ss->__z_p_za + arm_sme_z_size(svl_b);
1195 }
1196 
1197 static inline const uint8_t *
const_arm_sme_p(const arm_sme_context_t * ss,uint16_t svl_b)1198 const_arm_sme_p(const arm_sme_context_t *ss, uint16_t svl_b)
1199 {
1200 	return ss->__z_p_za + arm_sme_z_size(svl_b);
1201 }
1202 
1203 static inline uint8_t *
arm_sme_za(arm_sme_context_t * ss,uint16_t svl_b)1204 arm_sme_za(arm_sme_context_t *ss, uint16_t svl_b)
1205 {
1206 	return ss->__z_p_za + arm_sme_z_size(svl_b) + arm_sme_p_size(svl_b);
1207 }
1208 
1209 static inline const uint8_t *
const_arm_sme_za(const arm_sme_context_t * ss,uint16_t svl_b)1210 const_arm_sme_za(const arm_sme_context_t *ss, uint16_t svl_b)
1211 {
1212 	return ss->__z_p_za + arm_sme_z_size(svl_b) + arm_sme_p_size(svl_b);
1213 }
1214 
1215 #endif /* !__has_ptrcheck */
1216 #endif /* HAS_ARM_FEAT_SME */
1217 
1218 /*
1219  * Aggregated context
1220  */
1221 
1222 struct arm_context {
1223 	struct arm_saved_state ss;
1224 	struct arm_neon_saved_state ns;
1225 };
1226 typedef struct arm_context arm_context_t;
1227 
1228 struct arm_kernel_context {
1229 	struct arm_kernel_saved_state ss;
1230 	struct arm_kernel_neon_saved_state ns;
1231 };
1232 typedef struct arm_kernel_context arm_kernel_context_t;
1233 
1234 extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*);
1235 extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*);
1236 
1237 #else /* defined(__arm64__) */
1238 #error Unknown arch
1239 #endif /* defined(__arm64__) */
1240 
1241 extern void saved_state_to_thread_state32(const arm_saved_state_t*, arm_thread_state32_t*);
1242 extern void thread_state32_to_saved_state(const arm_thread_state32_t*, arm_saved_state_t*);
1243 
1244 #endif /* XNU_KERNEL_PRIVATE */
1245 
1246 #endif /* defined (__arm__) || defined (__arm64__) */
1247 
1248 #endif /* _ARM_THREAD_STATUS_H_ */
1249