xref: /xnu-10002.81.5/osfmk/mach/arm/thread_status.h (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * FILE_ID: thread_status.h
30  */
31 
32 
33 #ifndef _ARM_THREAD_STATUS_H_
34 #define _ARM_THREAD_STATUS_H_
35 
36 #if defined (__arm__) || defined (__arm64__)
37 
38 #include <mach/machine/_structs.h>
39 #include <mach/machine/thread_state.h>
40 #include <mach/message.h>
41 #include <mach/vm_types.h>
42 
43 #ifdef XNU_KERNEL_PRIVATE
44 #include <os/refcnt.h>
45 #endif
46 
47 /*
48  *    Support for determining the state of a thread
49  */
50 
51 
52 /*
53  *  Flavors
54  */
55 
56 #define ARM_THREAD_STATE         1
57 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE
58 #define ARM_VFP_STATE            2
59 #define ARM_EXCEPTION_STATE      3
60 #define ARM_DEBUG_STATE          4 /* pre-armv8 */
61 #define THREAD_STATE_NONE        5
62 #define ARM_THREAD_STATE64       6
63 #define ARM_EXCEPTION_STATE64    7
64 //      ARM_THREAD_STATE_LAST    8 /* legacy */
65 #define ARM_THREAD_STATE32       9
66 
67 #ifdef XNU_KERNEL_PRIVATE
68 #define X86_THREAD_STATE_NONE    13 /* i386/thread_status.h THREAD_STATE_NONE */
69 #endif /* XNU_KERNEL_PRIVATE */
70 
71 /* API */
72 #define ARM_DEBUG_STATE32        14
73 #define ARM_DEBUG_STATE64        15
74 #define ARM_NEON_STATE           16
75 #define ARM_NEON_STATE64         17
76 #define ARM_CPMU_STATE64         18
77 
78 #ifdef XNU_KERNEL_PRIVATE
79 /* For kernel use */
80 #define ARM_SAVED_STATE32        20
81 #define ARM_SAVED_STATE64        21
82 #define ARM_NEON_SAVED_STATE32   22
83 #define ARM_NEON_SAVED_STATE64   23
84 #endif /* XNU_KERNEL_PRIVATE */
85 
86 #define ARM_PAGEIN_STATE         27
87 
88 
89 #ifndef ARM_STATE_FLAVOR_IS_OTHER_VALID
90 #define ARM_STATE_FLAVOR_IS_OTHER_VALID(_flavor_) 0
91 #endif
92 
93 #define FLAVOR_MODIFIES_CORE_CPU_REGISTERS(x) \
94 ((x == ARM_THREAD_STATE) ||     \
95  (x == ARM_THREAD_STATE32) ||   \
96  (x == ARM_THREAD_STATE64))
97 
98 #define VALID_THREAD_STATE_FLAVOR(x) \
99 	((x == ARM_THREAD_STATE) ||           \
100 	 (x == ARM_VFP_STATE) ||              \
101 	 (x == ARM_EXCEPTION_STATE) ||        \
102 	 (x == ARM_DEBUG_STATE) ||            \
103 	 (x == THREAD_STATE_NONE) ||          \
104 	 (x == ARM_THREAD_STATE32) ||         \
105 	 (x == ARM_THREAD_STATE64) ||         \
106 	 (x == ARM_EXCEPTION_STATE64) ||      \
107 	 (x == ARM_NEON_STATE) ||             \
108 	 (x == ARM_NEON_STATE64) ||           \
109 	 (x == ARM_DEBUG_STATE32) ||          \
110 	 (x == ARM_DEBUG_STATE64) ||          \
111 	 (x == ARM_PAGEIN_STATE) ||           \
112 	 (ARM_STATE_FLAVOR_IS_OTHER_VALID(x)))
113 
114 struct arm_state_hdr {
115 	uint32_t flavor;
116 	uint32_t count;
117 };
118 typedef struct arm_state_hdr arm_state_hdr_t;
119 
120 typedef _STRUCT_ARM_THREAD_STATE   arm_thread_state_t;
121 typedef _STRUCT_ARM_THREAD_STATE   arm_thread_state32_t;
122 typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t;
123 
124 #if !defined(KERNEL)
125 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__)
126 
127 /* Accessor macros for arm_thread_state64_t pointer fields */
128 
129 /* Return pc field of arm_thread_state64_t as a data pointer value */
130 #define arm_thread_state64_get_pc(ts) \
131 	        __darwin_arm_thread_state64_get_pc(ts)
132 /* Return pc field of arm_thread_state64_t as a function pointer. May return
133  * NULL if a valid function pointer cannot be constructed, the caller should
134  * fall back to the arm_thread_state64_get_pc() macro in that case. */
135 #define arm_thread_state64_get_pc_fptr(ts) \
136 	        __darwin_arm_thread_state64_get_pc_fptr(ts)
137 /* Set pc field of arm_thread_state64_t to a function pointer */
138 #define arm_thread_state64_set_pc_fptr(ts, fptr) \
139 	        __darwin_arm_thread_state64_set_pc_fptr(ts, fptr)
140 /* Return lr field of arm_thread_state64_t as a data pointer value */
141 #define arm_thread_state64_get_lr(ts) \
142 	        __darwin_arm_thread_state64_get_lr(ts)
143 /* Return lr field of arm_thread_state64_t as a function pointer. May return
144  * NULL if a valid function pointer cannot be constructed, the caller should
145  * fall back to the arm_thread_state64_get_lr() macro in that case. */
146 #define arm_thread_state64_get_lr_fptr(ts) \
147 	        __darwin_arm_thread_state64_get_lr_fptr(ts)
148 /* Set lr field of arm_thread_state64_t to a function pointer */
149 #define arm_thread_state64_set_lr_fptr(ts, fptr) \
150 	        __darwin_arm_thread_state64_set_lr_fptr(ts, fptr)
151 /* Return sp field of arm_thread_state64_t as a data pointer value */
152 #define arm_thread_state64_get_sp(ts) \
153 	        __darwin_arm_thread_state64_get_sp(ts)
154 /* Set sp field of arm_thread_state64_t to a data pointer value */
155 #define arm_thread_state64_set_sp(ts, ptr) \
156 	        __darwin_arm_thread_state64_set_sp(ts, ptr)
157 /* Return fp field of arm_thread_state64_t as a data pointer value */
158 #define arm_thread_state64_get_fp(ts) \
159 	        __darwin_arm_thread_state64_get_fp(ts)
160 /* Set fp field of arm_thread_state64_t to a data pointer value */
161 #define arm_thread_state64_set_fp(ts, ptr) \
162 	        __darwin_arm_thread_state64_set_fp(ts, ptr)
163 /* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */
164 #define arm_thread_state64_ptrauth_strip(ts) \
165 	        __darwin_arm_thread_state64_ptrauth_strip(ts)
166 
167 #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */
168 #endif /* !defined(KERNEL) */
169 
170 struct arm_unified_thread_state {
171 	arm_state_hdr_t ash;
172 	union {
173 		arm_thread_state32_t ts_32;
174 		arm_thread_state64_t ts_64;
175 	} uts;
176 };
177 #define ts_32 uts.ts_32
178 #define ts_64 uts.ts_64
179 typedef struct arm_unified_thread_state arm_unified_thread_state_t;
180 
181 #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
182 	(sizeof (arm_thread_state_t)/sizeof(uint32_t)))
183 #define ARM_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \
184 	(sizeof (arm_thread_state32_t)/sizeof(uint32_t)))
185 #define ARM_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \
186 	(sizeof (arm_thread_state64_t)/sizeof(uint32_t)))
187 #define ARM_UNIFIED_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
188 	(sizeof (arm_unified_thread_state_t)/sizeof(uint32_t)))
189 
190 
191 typedef _STRUCT_ARM_VFP_STATE         arm_vfp_state_t;
192 typedef _STRUCT_ARM_NEON_STATE        arm_neon_state_t;
193 typedef _STRUCT_ARM_NEON_STATE        arm_neon_state32_t;
194 typedef _STRUCT_ARM_NEON_STATE64      arm_neon_state64_t;
195 
196 
197 typedef _STRUCT_ARM_EXCEPTION_STATE   arm_exception_state_t;
198 typedef _STRUCT_ARM_EXCEPTION_STATE   arm_exception_state32_t;
199 typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t;
200 
201 typedef _STRUCT_ARM_DEBUG_STATE32     arm_debug_state32_t;
202 typedef _STRUCT_ARM_DEBUG_STATE64     arm_debug_state64_t;
203 
204 typedef _STRUCT_ARM_PAGEIN_STATE      arm_pagein_state_t;
205 
206 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
207 /* See below for ARM64 kernel structure definition for arm_debug_state. */
208 #else /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
209 /*
210  * Otherwise not ARM64 kernel and we must preserve legacy ARM definitions of
211  * arm_debug_state for binary compatability of userland consumers of this file.
212  */
213 #if defined(__arm__)
214 typedef _STRUCT_ARM_DEBUG_STATE        arm_debug_state_t;
215 #elif defined(__arm64__)
216 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t;
217 #else /* defined(__arm__) */
218 #error Undefined architecture
219 #endif /* defined(__arm__) */
220 #endif /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
221 
222 #define ARM_VFP_STATE_COUNT ((mach_msg_type_number_t) \
223 	(sizeof (arm_vfp_state_t)/sizeof(uint32_t)))
224 
225 #define ARM_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \
226 	(sizeof (arm_exception_state_t)/sizeof(uint32_t)))
227 
228 #define ARM_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \
229 	(sizeof (arm_exception_state64_t)/sizeof(uint32_t)))
230 
231 #define ARM_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
232 	(sizeof (arm_debug_state_t)/sizeof(uint32_t)))
233 
234 #define ARM_DEBUG_STATE32_COUNT ((mach_msg_type_number_t) \
235 	(sizeof (arm_debug_state32_t)/sizeof(uint32_t)))
236 
237 #define ARM_PAGEIN_STATE_COUNT ((mach_msg_type_number_t) \
238 	(sizeof (arm_pagein_state_t)/sizeof(uint32_t)))
239 
240 #define ARM_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \
241 	(sizeof (arm_debug_state64_t)/sizeof(uint32_t)))
242 
243 #define ARM_NEON_STATE_COUNT ((mach_msg_type_number_t) \
244 	(sizeof (arm_neon_state_t)/sizeof(uint32_t)))
245 
246 #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \
247 	(sizeof (arm_neon_state64_t)/sizeof(uint32_t)))
248 
249 #define MACHINE_THREAD_STATE       ARM_THREAD_STATE
250 #define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
251 
252 
253 /*
254  * Largest state on this machine:
255  */
256 #define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX
257 
258 #ifdef XNU_KERNEL_PRIVATE
259 
260 #if CONFIG_DTRACE
261 #define HAS_ADD_SAVED_STATE_PC          1
262 #define HAS_SET_SAVED_STATE_PC          1
263 #define HAS_SET_SAVED_STATE_LR          1
264 #define HAS_SET_SAVED_STATE_REG         1
265 #define HAS_MASK_SAVED_STATE_CPSR       1
266 #endif /* CONFIG_DTRACE */
267 
268 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
269 #define HAS_SET_SAVED_STATE_CPSR        1
270 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
271 
272 #if CONFIG_XNUPOST
273 #define HAS_ADD_SAVED_STATE_PC          1
274 #define HAS_SET_SAVED_STATE_PC          1
275 #endif /* CONFIG_DTRACE */
276 
277 #if DEBUG || DEVELOPMENT
278 #define HAS_ADD_SAVED_STATE_PC          1
279 #endif
280 
281 
282 static inline boolean_t
is_thread_state32(const arm_unified_thread_state_t * its)283 is_thread_state32(const arm_unified_thread_state_t *its)
284 {
285 	return its->ash.flavor == ARM_THREAD_STATE32;
286 }
287 
288 static inline boolean_t
is_thread_state64(const arm_unified_thread_state_t * its)289 is_thread_state64(const arm_unified_thread_state_t *its)
290 {
291 	return its->ash.flavor == ARM_THREAD_STATE64;
292 }
293 
294 static inline arm_thread_state32_t*
thread_state32(arm_unified_thread_state_t * its)295 thread_state32(arm_unified_thread_state_t *its)
296 {
297 	return &its->ts_32;
298 }
299 
300 static inline arm_thread_state64_t*
thread_state64(arm_unified_thread_state_t * its)301 thread_state64(arm_unified_thread_state_t *its)
302 {
303 	return &its->ts_64;
304 }
305 
306 static inline const arm_thread_state32_t*
const_thread_state32(const arm_unified_thread_state_t * its)307 const_thread_state32(const arm_unified_thread_state_t *its)
308 {
309 	return &its->ts_32;
310 }
311 
312 static inline const arm_thread_state64_t*
const_thread_state64(const arm_unified_thread_state_t * its)313 const_thread_state64(const arm_unified_thread_state_t *its)
314 {
315 	return &its->ts_64;
316 }
317 
318 #if defined(__arm64__)
319 
320 #include <kern/assert.h>
321 #include <arm64/proc_reg.h>
322 #define CAST_ASSERT_SAFE(type, val) (assert((val) == ((type)(val))), (type)(val))
323 
324 /*
325  * GPR context
326  */
327 
328 struct arm_saved_state32 {
329 	uint32_t r[13];     /* General purpose register r0-r12 */
330 	uint32_t sp;        /* Stack pointer r13 */
331 	uint32_t lr;        /* Link register r14 */
332 	uint32_t pc;        /* Program counter r15 */
333 	uint32_t cpsr;      /* Current program status register */
334 	uint32_t far;       /* Virtual fault address */
335 	uint32_t esr;       /* Exception syndrome register */
336 	uint32_t exception; /* Exception number */
337 };
338 typedef struct arm_saved_state32 arm_saved_state32_t;
339 
340 struct arm_saved_state32_tagged {
341 	uint32_t                 tag;
342 	struct arm_saved_state32 state;
343 };
344 typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t;
345 
346 #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
347 	        (sizeof(arm_saved_state32_t)/sizeof(unsigned int)))
348 
349 struct arm_saved_state64 {
350 	uint64_t x[29];     /* General purpose registers x0-x28 */
351 	uint64_t fp;        /* Frame pointer x29 */
352 	uint64_t lr;        /* Link register x30 */
353 	uint64_t sp;        /* Stack pointer x31 */
354 	uint64_t pc;        /* Program counter */
355 	uint32_t cpsr;      /* Current program status register */
356 	uint32_t reserved;  /* Reserved padding */
357 	uint64_t far;       /* Virtual fault address */
358 	uint32_t esr;       /* Exception syndrome register */
359 	uint32_t exception; /* Exception number */
360 #if HAS_APPLE_PAC
361 	uint64_t jophash;
362 #endif /* HAS_APPLE_PAC */
363 };
364 typedef struct arm_saved_state64 arm_saved_state64_t;
365 
366 #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
367 	(sizeof(arm_saved_state64_t)/sizeof(unsigned int)))
368 
369 struct arm_saved_state64_tagged {
370 	uint32_t                 tag;
371 	struct arm_saved_state64 state;
372 };
373 typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t;
374 
375 struct arm_saved_state {
376 	arm_state_hdr_t ash;
377 	union {
378 		struct arm_saved_state32 ss_32;
379 		struct arm_saved_state64 ss_64;
380 	} uss;
381 } __attribute__((aligned(16)));
382 #define ss_32 uss.ss_32
383 #define ss_64 uss.ss_64
384 
385 typedef struct arm_saved_state arm_saved_state_t;
386 
387 struct arm_kernel_saved_state {
388 	uint64_t x[10];     /* General purpose registers x19-x28 */
389 	uint64_t fp;        /* Frame pointer x29 */
390 	uint64_t lr;        /* Link register x30 */
391 	uint64_t sp;        /* Stack pointer x31 */
392 	/* Some things here we DO need to preserve */
393 	uint8_t pc_was_in_userspace;
394 	uint8_t ssbs;
395 	uint8_t dit;
396 	uint8_t uao;
397 } __attribute__((aligned(16)));
398 
399 typedef struct arm_kernel_saved_state arm_kernel_saved_state_t;
400 
401 extern void ml_panic_on_invalid_old_cpsr(const arm_saved_state_t *) __attribute__((noreturn));
402 
403 extern void ml_panic_on_invalid_new_cpsr(const arm_saved_state_t *, uint32_t) __attribute__((noreturn));
404 
405 #if HAS_APPLE_PAC
406 
407 #include <sys/cdefs.h>
408 
409 /*
410  * Used by MANIPULATE_SIGNED_THREAD_STATE(), potentially from C++ (IOKit) code.
411  * Open-coded to prevent a circular dependency between mach/arm/thread_status.h
412  * and osfmk/arm/machine_routines.h.
413  */
414 __BEGIN_DECLS
415 extern uint64_t ml_pac_safe_interrupts_disable(void);
416 extern void ml_pac_safe_interrupts_restore(uint64_t);
417 __END_DECLS
418 
419 /*
420  * Methods used to sign and check thread state to detect corruptions of saved
421  * thread state across exceptions and context switches.
422  */
423 extern void ml_sign_thread_state(arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
424 
425 extern void ml_check_signed_state(const arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
426 
427 /* XXX: including stddef.f here breaks ctfmerge on some builds, so use __builtin_offsetof() instead of offsetof() */
428 #define ss64_offsetof(x) __builtin_offsetof(struct arm_saved_state, ss_64.x)
429 
430 /**
431  * Verify the signed thread state in _iss, execute the assembly instructions
432  * _instr, and re-sign the modified thread state.  Varargs specify additional
433  * inputs.
434  *
435  * _instr may read or modify the thread state in the following registers:
436  *
437  * x0: _iss
438  * x1: authed _iss->ss_64.pc
439  * w2: authed _iss->ss_64.cpsr
440  * x3: authed _iss->ss_64.lr
441  * x4: authed _iss->ss_64.x16
442  * x5: authed _iss->ss_64.x17
443  * x6: scratch register
444  * x7: scratch register
445  *
446  * If _instr makes no changes to the thread state, it may skip re-signing by
447  * branching to label 0.
448  */
449 #define MANIPULATE_SIGNED_THREAD_STATE(_iss, _instr, ...)                       \
450 	do {                                                                    \
451 	        uint64_t _intr = ml_pac_safe_interrupts_disable();              \
452 	        asm volatile (                                                  \
453 	                "mov	x9, lr"				"\n"            \
454 	                "mov	x0, %[iss]"			"\n"            \
455 	                "msr	SPSel, #1"			"\n"            \
456 	                "ldp	x4, x5, [x0, %[SS64_X16]]"	"\n"            \
457 	                "ldr	x7, [x0, %[SS64_PC]]"		"\n"            \
458 	                "ldr	w8, [x0, %[SS64_CPSR]]"		"\n"            \
459 	                "ldr	x3, [x0, %[SS64_LR]]"		"\n"            \
460 	                "mov	x1, x7"				"\n"            \
461 	                "mov	w2, w8"				"\n"            \
462 	                "bl	_ml_check_signed_state"		"\n"            \
463 	                "mov	x1, x7"				"\n"            \
464 	                "mov	w2, w8"				"\n"            \
465 	                _instr					"\n"            \
466 	                "bl	_ml_sign_thread_state"		"\n"            \
467 	                "0:"					"\n"            \
468 	                "msr	SPSel, #0"			"\n"            \
469 	                "mov	lr, x9"				"\n"            \
470 	                :                                                       \
471 	                : [iss]         "r"(_iss),                              \
472 	                  [SS64_X16]	"i"(ss64_offsetof(x[16])),              \
473 	                  [SS64_PC]	"i"(ss64_offsetof(pc)),                 \
474 	                  [SS64_CPSR]	"i"(ss64_offsetof(cpsr)),               \
475 	                  [SS64_LR]	"i"(ss64_offsetof(lr)),##__VA_ARGS__    \
476 	                : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", \
477 	                  "x9"                                                  \
478 	        );                                                              \
479 	        ml_pac_safe_interrupts_restore(_intr);                          \
480 	} while (0)
481 
482 #define VERIFY_USER_THREAD_STATE_INSTR                                          \
483 	        "and	w6, w2, %[CPSR_EL_MASK]"		"\n"            \
484 	        "cmp	w6, %[CPSR_EL0]"			"\n"            \
485 	        "b.eq	1f"					"\n"            \
486 	        "bl	_ml_panic_on_invalid_old_cpsr"		"\n"            \
487 	        "brk	#0"					"\n"            \
488 	"1:"							"\n"
489 
490 #define VERIFY_USER_THREAD_STATE_INPUTS                                         \
491 	[CPSR_EL_MASK]	"i"(PSR64_MODE_EL_MASK),                                \
492 	[CPSR_EL0]	"i"(PSR64_MODE_EL0)
493 
494 #define MANIPULATE_SIGNED_USER_THREAD_STATE(_iss, _instr, ...)                  \
495 	MANIPULATE_SIGNED_THREAD_STATE(_iss,                                    \
496 	        VERIFY_USER_THREAD_STATE_INSTR                                  \
497 	        _instr,                                                         \
498 	        VERIFY_USER_THREAD_STATE_INPUTS, ##__VA_ARGS__)
499 
500 static inline void
check_and_sign_copied_user_thread_state(arm_saved_state_t * dst,const arm_saved_state_t * src)501 check_and_sign_copied_user_thread_state(arm_saved_state_t *dst, const arm_saved_state_t *src)
502 {
503 	MANIPULATE_SIGNED_USER_THREAD_STATE(src,
504 	    "mov	x0, %[dst]",
505 	    [dst] "r"(dst)
506 	    );
507 }
508 #endif /* HAS_APPLE_PAC */
509 
510 static inline boolean_t
is_saved_state32(const arm_saved_state_t * iss)511 is_saved_state32(const arm_saved_state_t *iss)
512 {
513 	return iss->ash.flavor == ARM_SAVED_STATE32;
514 }
515 
516 static inline boolean_t
is_saved_state64(const arm_saved_state_t * iss)517 is_saved_state64(const arm_saved_state_t *iss)
518 {
519 	return iss->ash.flavor == ARM_SAVED_STATE64;
520 }
521 
522 static inline arm_saved_state32_t*
saved_state32(arm_saved_state_t * iss)523 saved_state32(arm_saved_state_t *iss)
524 {
525 	return &iss->ss_32;
526 }
527 
528 static inline const arm_saved_state32_t*
const_saved_state32(const arm_saved_state_t * iss)529 const_saved_state32(const arm_saved_state_t *iss)
530 {
531 	return &iss->ss_32;
532 }
533 
534 static inline arm_saved_state64_t*
saved_state64(arm_saved_state_t * iss)535 saved_state64(arm_saved_state_t *iss)
536 {
537 	return &iss->ss_64;
538 }
539 
540 static inline const arm_saved_state64_t*
const_saved_state64(const arm_saved_state_t * iss)541 const_saved_state64(const arm_saved_state_t *iss)
542 {
543 	return &iss->ss_64;
544 }
545 
546 static inline register_t
get_saved_state_pc(const arm_saved_state_t * iss)547 get_saved_state_pc(const arm_saved_state_t *iss)
548 {
549 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc);
550 }
551 
552 #if HAS_ADD_SAVED_STATE_PC
553 static inline void
add_saved_state_pc(arm_saved_state_t * iss,int diff)554 add_saved_state_pc(arm_saved_state_t *iss, int diff)
555 {
556 	if (is_saved_state32(iss)) {
557 		uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
558 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
559 	} else {
560 #if HAS_APPLE_PAC
561 		MANIPULATE_SIGNED_THREAD_STATE(iss,
562 		    "mov	w6, %w[diff]		\n"
563 		    "add	x1, x1, w6, sxtw	\n"
564 		    "str	x1, [x0, %[SS64_PC]]	\n",
565 		    [diff] "r"(diff)
566 		    );
567 #else
568 		saved_state64(iss)->pc += (unsigned long)diff;
569 #endif /* HAS_APPLE_PAC */
570 	}
571 }
572 #endif /* HAS_ADD_SAVED_STATE_PC */
573 
574 static inline void
add_user_saved_state_pc(arm_saved_state_t * iss,int diff)575 add_user_saved_state_pc(arm_saved_state_t *iss, int diff)
576 {
577 	if (is_saved_state32(iss)) {
578 		uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
579 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
580 	} else {
581 #if HAS_APPLE_PAC
582 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
583 		    "mov	w6, %w[diff]		\n"
584 		    "add	x1, x1, w6, sxtw	\n"
585 		    "str	x1, [x0, %[SS64_PC]]	\n",
586 		    [diff] "r"(diff)
587 		    );
588 #else
589 		saved_state64(iss)->pc += (unsigned long)diff;
590 #endif /* HAS_APPLE_PAC */
591 	}
592 }
593 
594 #if HAS_SET_SAVED_STATE_PC
595 static inline void
set_saved_state_pc(arm_saved_state_t * iss,register_t pc)596 set_saved_state_pc(arm_saved_state_t *iss, register_t pc)
597 {
598 	if (is_saved_state32(iss)) {
599 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
600 	} else {
601 #if HAS_APPLE_PAC
602 		MANIPULATE_SIGNED_THREAD_STATE(iss,
603 		    "mov	x1, %[pc]		\n"
604 		    "str	x1, [x0, %[SS64_PC]]	\n",
605 		    [pc] "r"(pc)
606 		    );
607 #else
608 		saved_state64(iss)->pc = (unsigned long)pc;
609 #endif /* HAS_APPLE_PAC */
610 	}
611 }
612 #endif /* HAS_SET_SAVED_STATE_PC */
613 
614 static inline void
set_user_saved_state_pc(arm_saved_state_t * iss,register_t pc)615 set_user_saved_state_pc(arm_saved_state_t *iss, register_t pc)
616 {
617 	if (is_saved_state32(iss)) {
618 		saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
619 	} else {
620 #if HAS_APPLE_PAC
621 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
622 		    "mov	x1, %[pc]		\n"
623 		    "str	x1, [x0, %[SS64_PC]]	\n",
624 		    [pc] "r"(pc)
625 		    );
626 #else
627 		saved_state64(iss)->pc = (unsigned long)pc;
628 #endif /* HAS_APPLE_PAC */
629 	}
630 }
631 
632 static inline register_t
get_saved_state_sp(const arm_saved_state_t * iss)633 get_saved_state_sp(const arm_saved_state_t *iss)
634 {
635 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp);
636 }
637 
638 static inline void
set_saved_state_sp(arm_saved_state_t * iss,register_t sp)639 set_saved_state_sp(arm_saved_state_t *iss, register_t sp)
640 {
641 	if (is_saved_state32(iss)) {
642 		saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp);
643 	} else {
644 		saved_state64(iss)->sp = (uint64_t)sp;
645 	}
646 }
647 
648 static inline register_t
get_saved_state_lr(const arm_saved_state_t * iss)649 get_saved_state_lr(const arm_saved_state_t *iss)
650 {
651 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr);
652 }
653 
654 #if HAS_SET_SAVED_STATE_LR
655 static inline void
set_saved_state_lr(arm_saved_state_t * iss,register_t lr)656 set_saved_state_lr(arm_saved_state_t *iss, register_t lr)
657 {
658 	if (is_saved_state32(iss)) {
659 		saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
660 	} else {
661 #if HAS_APPLE_PAC
662 		MANIPULATE_SIGNED_THREAD_STATE(iss,
663 		    "mov	x3, %[lr]		\n"
664 		    "str	x3, [x0, %[SS64_LR]]	\n",
665 		    [lr] "r"(lr)
666 		    );
667 #else
668 		saved_state64(iss)->lr = (unsigned long)lr;
669 #endif /* HAS_APPLE_PAC */
670 	}
671 }
672 #endif /* HAS_SET_SAVED_STATE_PC */
673 
674 static inline void
set_user_saved_state_lr(arm_saved_state_t * iss,register_t lr)675 set_user_saved_state_lr(arm_saved_state_t *iss, register_t lr)
676 {
677 	if (is_saved_state32(iss)) {
678 		saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
679 	} else {
680 #if HAS_APPLE_PAC
681 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
682 		    "mov	x3, %[lr]		\n"
683 		    "str	x3, [x0, %[SS64_LR]]	\n",
684 		    [lr] "r"(lr)
685 		    );
686 #else
687 		saved_state64(iss)->lr = (unsigned long)lr;
688 #endif /* HAS_APPLE_PAC */
689 	}
690 }
691 
692 static inline register_t
get_saved_state_fp(const arm_saved_state_t * iss)693 get_saved_state_fp(const arm_saved_state_t *iss)
694 {
695 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp);
696 }
697 
698 static inline void
set_saved_state_fp(arm_saved_state_t * iss,register_t fp)699 set_saved_state_fp(arm_saved_state_t *iss, register_t fp)
700 {
701 	if (is_saved_state32(iss)) {
702 		saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp);
703 	} else {
704 		saved_state64(iss)->fp = (uint64_t)fp;
705 	}
706 }
707 
708 static inline int
check_saved_state_reglimit(const arm_saved_state_t * iss,unsigned reg)709 check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg)
710 {
711 	return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT);
712 }
713 
714 static inline register_t
get_saved_state_reg(const arm_saved_state_t * iss,unsigned reg)715 get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg)
716 {
717 	if (!check_saved_state_reglimit(iss, reg)) {
718 		return 0;
719 	}
720 
721 	return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]));
722 }
723 
724 #if HAS_SET_SAVED_STATE_REG
725 static inline void
set_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)726 set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
727 {
728 	if (!check_saved_state_reglimit(iss, reg)) {
729 		return;
730 	}
731 
732 	if (is_saved_state32(iss)) {
733 		saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
734 	} else {
735 #if HAS_APPLE_PAC
736 		/* x16 and x17 are part of the jophash */
737 		if (reg == 16) {
738 			MANIPULATE_SIGNED_THREAD_STATE(iss,
739 			    "mov	x4, %[value]		\n"
740 			    "str	x4, [x0, %[SS64_X16]]	\n",
741 			    [value] "r"(value)
742 			    );
743 			return;
744 		} else if (reg == 17) {
745 			MANIPULATE_SIGNED_THREAD_STATE(iss,
746 			    "mov	x5, %[value]		\n"
747 			    "str	x5, [x0, %[SS64_X17]]	\n",
748 			    [value] "r"(value),
749 			    [SS64_X17] "i"(ss64_offsetof(x[17]))
750 			    );
751 			return;
752 		}
753 #endif
754 		saved_state64(iss)->x[reg] = (uint64_t)value;
755 	}
756 }
757 #endif /* HAS_SET_SAVED_STATE_REG */
758 
759 static inline void
set_user_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)760 set_user_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
761 {
762 	if (!check_saved_state_reglimit(iss, reg)) {
763 		return;
764 	}
765 
766 	if (is_saved_state32(iss)) {
767 		saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
768 	} else {
769 #if HAS_APPLE_PAC
770 		/* x16 and x17 are part of the jophash */
771 		if (reg == 16) {
772 			MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
773 			    "mov	x4, %[value]		\n"
774 			    "str	x4, [x0, %[SS64_X16]]	\n",
775 			    [value] "r"(value)
776 			    );
777 			return;
778 		} else if (reg == 17) {
779 			MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
780 			    "mov	x5, %[value]		\n"
781 			    "str	x5, [x0, %[SS64_X17]]	\n",
782 			    [value] "r"(value),
783 			    [SS64_X17] "i"(ss64_offsetof(x[17]))
784 			    );
785 			return;
786 		}
787 #endif
788 		saved_state64(iss)->x[reg] = (uint64_t)value;
789 	}
790 }
791 
792 
793 static inline uint32_t
get_saved_state_cpsr(const arm_saved_state_t * iss)794 get_saved_state_cpsr(const arm_saved_state_t *iss)
795 {
796 	return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr;
797 }
798 
799 #if HAS_MASK_SAVED_STATE_CPSR
800 static inline void
mask_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)801 mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
802 {
803 	if (is_saved_state32(iss)) {
804 		saved_state32(iss)->cpsr |= set_bits;
805 		saved_state32(iss)->cpsr &= ~clear_bits;
806 	} else {
807 #if HAS_APPLE_PAC
808 		MANIPULATE_SIGNED_THREAD_STATE(iss,
809 		    "mov	w6, %w[set_bits]	\n"
810 		    "orr	w2, w2, w6, lsl #0	\n"
811 		    "mov	w6, %w[clear_bits]	\n"
812 		    "bic	w2, w2, w6, lsl #0	\n"
813 		    "str	w2, [x0, %[SS64_CPSR]]	\n",
814 		    [set_bits] "r"(set_bits),
815 		    [clear_bits] "r"(clear_bits)
816 		    );
817 #else
818 		saved_state64(iss)->cpsr |= set_bits;
819 		saved_state64(iss)->cpsr &= ~clear_bits;
820 #endif /* HAS_APPLE_PAC */
821 	}
822 }
823 #endif /* HAS_MASK_SAVED_STATE_CPSR */
824 
825 static inline void
mask_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)826 mask_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
827 {
828 	if (is_saved_state32(iss)) {
829 		uint32_t new_cpsr = saved_state32(iss)->cpsr;
830 		new_cpsr |= set_bits;
831 		new_cpsr &= ~clear_bits;
832 		if (!PSR_IS_USER(new_cpsr)) {
833 			ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
834 		}
835 		saved_state32(iss)->cpsr = new_cpsr;
836 	} else {
837 #if HAS_APPLE_PAC
838 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
839 		    "mov	w6, %w[set_bits]		\n"
840 		    "orr	w2, w2, w6, lsl #0		\n"
841 		    "mov	w6, %w[clear_bits]		\n"
842 		    "bic	w2, w2, w6, lsl #0		\n"
843 		    "and	w6, w2, %[CPSR_EL_MASK]		\n"
844 		    "cmp	w6, %[CPSR_EL0]			\n"
845 		    "b.eq	1f				\n"
846 		    "mov	w1, w2				\n"
847 		    "bl		_ml_panic_on_invalid_new_cpsr	\n"
848 		    "brk	#0				\n"
849 		    "1:						\n"
850 		    "str	w2, [x0, %[SS64_CPSR]]		\n",
851 		    [set_bits] "r"(set_bits),
852 		    [clear_bits] "r"(clear_bits)
853 		    );
854 #else
855 		uint32_t new_cpsr = saved_state64(iss)->cpsr;
856 		new_cpsr |= set_bits;
857 		new_cpsr &= ~clear_bits;
858 		if (!PSR64_IS_USER(new_cpsr)) {
859 			ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
860 		}
861 		saved_state64(iss)->cpsr = new_cpsr;
862 #endif /* HAS_APPLE_PAC */
863 	}
864 }
865 
866 #if HAS_SET_SAVED_STATE_CPSR
867 static inline void
set_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)868 set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
869 {
870 	if (is_saved_state32(iss)) {
871 		saved_state32(iss)->cpsr = cpsr;
872 	} else {
873 #if HAS_APPLE_PAC
874 		MANIPULATE_SIGNED_THREAD_STATE(iss,
875 		    "mov	w2, %w[cpsr]		\n"
876 		    "str	w2, [x0, %[SS64_CPSR]]	\n",
877 		    [cpsr] "r"(cpsr)
878 		    );
879 #else
880 		saved_state64(iss)->cpsr = cpsr;
881 #endif /* HAS_APPLE_PAC */
882 	}
883 }
884 #endif /* HAS_SET_SAVED_STATE_CPSR */
885 
886 static inline void
set_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)887 set_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
888 {
889 	if (is_saved_state32(iss)) {
890 		if (!PSR_IS_USER(cpsr)) {
891 			ml_panic_on_invalid_new_cpsr(iss, cpsr);
892 		}
893 		saved_state32(iss)->cpsr = cpsr;
894 	} else {
895 #if HAS_APPLE_PAC
896 		MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
897 		    "mov	w2, %w[cpsr]			\n"
898 		    "and	w6, w2, %[CPSR_EL_MASK]		\n"
899 		    "cmp	w6, %[CPSR_EL0]			\n"
900 		    "b.eq	1f				\n"
901 		    "mov	w1, w2				\n"
902 		    "bl		_ml_panic_on_invalid_new_cpsr	\n"
903 		    "brk	#0				\n"
904 		    "1:						\n"
905 		    "str	w2, [x0, %[SS64_CPSR]]		\n",
906 		    [cpsr] "r"(cpsr)
907 		    );
908 #else
909 		if (!PSR64_IS_USER(cpsr)) {
910 			ml_panic_on_invalid_new_cpsr(iss, cpsr);
911 		}
912 		saved_state64(iss)->cpsr = cpsr;
913 #endif /* HAS_APPLE_PAC */
914 	}
915 }
916 
917 static inline register_t
get_saved_state_far(const arm_saved_state_t * iss)918 get_saved_state_far(const arm_saved_state_t *iss)
919 {
920 	return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far);
921 }
922 
923 static inline void
set_saved_state_far(arm_saved_state_t * iss,register_t far)924 set_saved_state_far(arm_saved_state_t *iss, register_t far)
925 {
926 	if (is_saved_state32(iss)) {
927 		saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far);
928 	} else {
929 		saved_state64(iss)->far = (uint64_t)far;
930 	}
931 }
932 
933 static inline uint32_t
get_saved_state_esr(const arm_saved_state_t * iss)934 get_saved_state_esr(const arm_saved_state_t *iss)
935 {
936 	return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr;
937 }
938 
939 static inline void
set_saved_state_esr(arm_saved_state_t * iss,uint32_t esr)940 set_saved_state_esr(arm_saved_state_t *iss, uint32_t esr)
941 {
942 	if (is_saved_state32(iss)) {
943 		saved_state32(iss)->esr = esr;
944 	} else {
945 		saved_state64(iss)->esr = esr;
946 	}
947 }
948 
949 static inline uint32_t
get_saved_state_exc(const arm_saved_state_t * iss)950 get_saved_state_exc(const arm_saved_state_t *iss)
951 {
952 	return is_saved_state32(iss) ? const_saved_state32(iss)->exception : const_saved_state64(iss)->exception;
953 }
954 
955 static inline void
set_saved_state_exc(arm_saved_state_t * iss,uint32_t exc)956 set_saved_state_exc(arm_saved_state_t *iss, uint32_t exc)
957 {
958 	if (is_saved_state32(iss)) {
959 		saved_state32(iss)->exception = exc;
960 	} else {
961 		saved_state64(iss)->exception = exc;
962 	}
963 }
964 
965 extern void panic_unimplemented(void);
966 
967 /**
968  * Extracts the SVC (Supervisor Call) number from the appropriate GPR (General
969  * Purpose Register).
970  *
971  * @param[in] iss the 32-bit or 64-bit ARM saved state (i.e. trap frame).
972  *
973  * @return The SVC number.
974  */
975 static inline int
get_saved_state_svc_number(const arm_saved_state_t * iss)976 get_saved_state_svc_number(const arm_saved_state_t *iss)
977 {
978 	return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */
979 }
980 
981 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t;
982 
983 struct arm_debug_aggregate_state {
984 	arm_state_hdr_t dsh;
985 	union {
986 		arm_debug_state32_t ds32;
987 		arm_debug_state64_t ds64;
988 	} uds;
989 	os_refcnt_t     ref;
990 } __attribute__((aligned(16)));
991 
992 typedef struct arm_debug_aggregate_state arm_debug_state_t;
993 
994 #define ARM_LEGACY_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
995 	(sizeof (arm_legacy_debug_state_t)/sizeof(uint32_t)))
996 
997 /*
998  * NEON context
999  */
1000 typedef __uint128_t uint128_t;
1001 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2)));
1002 typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4)));
1003 
1004 struct arm_neon_saved_state32 {
1005 	union {
1006 		uint128_t q[16];
1007 		uint64_t  d[32];
1008 		uint32_t  s[32];
1009 	} v;
1010 	uint32_t fpsr;
1011 	uint32_t fpcr;
1012 };
1013 typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t;
1014 
1015 #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
1016 	(sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int)))
1017 
1018 struct arm_neon_saved_state64 {
1019 	union {
1020 		uint128_t  q[32];
1021 		uint64x2_t d[32];
1022 		uint32x4_t s[32];
1023 	} v;
1024 	uint32_t fpsr;
1025 	uint32_t fpcr;
1026 };
1027 typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t;
1028 
1029 #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
1030 	(sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int)))
1031 
1032 struct arm_neon_saved_state {
1033 	arm_state_hdr_t nsh;
1034 	union {
1035 		struct arm_neon_saved_state32 ns_32;
1036 		struct arm_neon_saved_state64 ns_64;
1037 	} uns;
1038 };
1039 typedef struct arm_neon_saved_state arm_neon_saved_state_t;
1040 #define ns_32 uns.ns_32
1041 #define ns_64 uns.ns_64
1042 
1043 struct arm_kernel_neon_saved_state {
1044 	uint64_t d[8];
1045 	uint32_t fpcr;
1046 };
1047 typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t;
1048 
1049 static inline boolean_t
is_neon_saved_state32(const arm_neon_saved_state_t * state)1050 is_neon_saved_state32(const arm_neon_saved_state_t *state)
1051 {
1052 	return state->nsh.flavor == ARM_NEON_SAVED_STATE32;
1053 }
1054 
1055 static inline boolean_t
is_neon_saved_state64(const arm_neon_saved_state_t * state)1056 is_neon_saved_state64(const arm_neon_saved_state_t *state)
1057 {
1058 	return state->nsh.flavor == ARM_NEON_SAVED_STATE64;
1059 }
1060 
1061 static inline arm_neon_saved_state32_t *
neon_state32(arm_neon_saved_state_t * state)1062 neon_state32(arm_neon_saved_state_t *state)
1063 {
1064 	return &state->ns_32;
1065 }
1066 
1067 static inline arm_neon_saved_state64_t *
neon_state64(arm_neon_saved_state_t * state)1068 neon_state64(arm_neon_saved_state_t *state)
1069 {
1070 	return &state->ns_64;
1071 }
1072 
1073 
1074 
1075 /*
1076  * Aggregated context
1077  */
1078 
1079 struct arm_context {
1080 	struct arm_saved_state ss;
1081 	struct arm_neon_saved_state ns;
1082 };
1083 typedef struct arm_context arm_context_t;
1084 
1085 struct arm_kernel_context {
1086 	struct arm_kernel_saved_state ss;
1087 	struct arm_kernel_neon_saved_state ns;
1088 };
1089 typedef struct arm_kernel_context arm_kernel_context_t;
1090 
1091 extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*);
1092 extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*);
1093 
1094 #else /* defined(__arm64__) */
1095 #error Unknown arch
1096 #endif /* defined(__arm64__) */
1097 
1098 extern void saved_state_to_thread_state32(const arm_saved_state_t*, arm_thread_state32_t*);
1099 extern void thread_state32_to_saved_state(const arm_thread_state32_t*, arm_saved_state_t*);
1100 
1101 #endif /* XNU_KERNEL_PRIVATE */
1102 
1103 #endif /* defined (__arm__) || defined (__arm64__) */
1104 
1105 #endif /* _ARM_THREAD_STATUS_H_ */
1106