1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * FILE_ID: thread_status.h
30 */
31
32
33 #ifndef _ARM_THREAD_STATUS_H_
34 #define _ARM_THREAD_STATUS_H_
35
36 #if defined (__arm__) || defined (__arm64__)
37
38 #include <mach/machine/_structs.h>
39 #include <mach/machine/thread_state.h>
40 #include <mach/message.h>
41 #include <mach/vm_types.h>
42
43 #ifdef XNU_KERNEL_PRIVATE
44 #include <os/refcnt.h>
45 #endif
46
47 /*
48 * Support for determining the state of a thread
49 */
50
51
52 /*
53 * Flavors
54 */
55
56 #define ARM_THREAD_STATE 1
57 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE
58 #define ARM_VFP_STATE 2
59 #define ARM_EXCEPTION_STATE 3
60 #define ARM_DEBUG_STATE 4 /* pre-armv8 */
61 #define THREAD_STATE_NONE 5
62 #define ARM_THREAD_STATE64 6
63 #define ARM_EXCEPTION_STATE64 7
64 // ARM_THREAD_STATE_LAST 8 /* legacy */
65 #define ARM_THREAD_STATE32 9
66 #define ARM_EXCEPTION_STATE64_V2 10
67
68 #ifdef XNU_KERNEL_PRIVATE
69 #define X86_THREAD_STATE_NONE 13 /* i386/thread_status.h THREAD_STATE_NONE */
70 #endif /* XNU_KERNEL_PRIVATE */
71
72 /* API */
73 #define ARM_DEBUG_STATE32 14
74 #define ARM_DEBUG_STATE64 15
75 #define ARM_NEON_STATE 16
76 #define ARM_NEON_STATE64 17
77 #define ARM_CPMU_STATE64 18
78
79 #ifdef XNU_KERNEL_PRIVATE
80 /* For kernel use */
81 #define ARM_SAVED_STATE32 20
82 #define ARM_SAVED_STATE64 21
83 #define ARM_NEON_SAVED_STATE32 22
84 #define ARM_NEON_SAVED_STATE64 23
85 #endif /* XNU_KERNEL_PRIVATE */
86
87 #define ARM_PAGEIN_STATE 27
88
89 /* API */
90 #define ARM_SME_STATE 28
91 #define ARM_SVE_Z_STATE1 29
92 #define ARM_SVE_Z_STATE2 30
93 #define ARM_SVE_P_STATE 31
94 #define ARM_SME_ZA_STATE1 32
95 #define ARM_SME_ZA_STATE2 33
96 #define ARM_SME_ZA_STATE3 34
97 #define ARM_SME_ZA_STATE4 35
98 #define ARM_SME_ZA_STATE5 36
99 #define ARM_SME_ZA_STATE6 37
100 #define ARM_SME_ZA_STATE7 38
101 #define ARM_SME_ZA_STATE8 39
102 #define ARM_SME_ZA_STATE9 40
103 #define ARM_SME_ZA_STATE10 41
104 #define ARM_SME_ZA_STATE11 42
105 #define ARM_SME_ZA_STATE12 42
106 #define ARM_SME_ZA_STATE13 44
107 #define ARM_SME_ZA_STATE14 45
108 #define ARM_SME_ZA_STATE15 46
109 #define ARM_SME_ZA_STATE16 47
110 #define ARM_SME2_STATE 48
111 #if XNU_KERNEL_PRIVATE
112 /* For kernel use */
113 #define ARM_SME_SAVED_STATE 49
114 #endif /* XNU_KERNEL_PRIVATE */
115
116 #define THREAD_STATE_FLAVORS 50 /* This must be updated to 1 more than the highest numerical state flavor */
117
118 #ifndef ARM_STATE_FLAVOR_IS_OTHER_VALID
119 #define ARM_STATE_FLAVOR_IS_OTHER_VALID(_flavor_) 0
120 #endif
121
122 #define FLAVOR_MODIFIES_CORE_CPU_REGISTERS(x) \
123 ((x == ARM_THREAD_STATE) || \
124 (x == ARM_THREAD_STATE32) || \
125 (x == ARM_THREAD_STATE64))
126
127 #define VALID_THREAD_STATE_FLAVOR(x) \
128 ((x == ARM_THREAD_STATE) || \
129 (x == ARM_VFP_STATE) || \
130 (x == ARM_EXCEPTION_STATE) || \
131 (x == ARM_DEBUG_STATE) || \
132 (x == THREAD_STATE_NONE) || \
133 (x == ARM_THREAD_STATE32) || \
134 (x == ARM_THREAD_STATE64) || \
135 (x == ARM_EXCEPTION_STATE64) || \
136 (x == ARM_EXCEPTION_STATE64_V2) || \
137 (x == ARM_NEON_STATE) || \
138 (x == ARM_NEON_STATE64) || \
139 (x == ARM_DEBUG_STATE32) || \
140 (x == ARM_DEBUG_STATE64) || \
141 (x == ARM_PAGEIN_STATE) || \
142 (ARM_STATE_FLAVOR_IS_OTHER_VALID(x)))
143 /*
144 * VALID_THREAD_STATE_FLAVOR() intentionally excludes ARM_SME_STATE through
145 * ARM_SME2_STATE, since these are not currently supported inside Mach exception
146 * ports.
147 */
148
149 struct arm_state_hdr {
150 uint32_t flavor;
151 uint32_t count;
152 };
153 typedef struct arm_state_hdr arm_state_hdr_t;
154
155 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state_t;
156 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state32_t;
157 typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t;
158
159 #if !defined(KERNEL)
160 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__)
161
162 /* Accessor macros for arm_thread_state64_t pointer fields */
163
164 /* Return pc field of arm_thread_state64_t as a data pointer value */
165 #define arm_thread_state64_get_pc(ts) \
166 __darwin_arm_thread_state64_get_pc(ts)
167 /* Return pc field of arm_thread_state64_t as a function pointer. May return
168 * NULL if a valid function pointer cannot be constructed, the caller should
169 * fall back to the arm_thread_state64_get_pc() macro in that case. */
170 #define arm_thread_state64_get_pc_fptr(ts) \
171 __darwin_arm_thread_state64_get_pc_fptr(ts)
172 /* Set pc field of arm_thread_state64_t to a function pointer */
173 #define arm_thread_state64_set_pc_fptr(ts, fptr) \
174 __darwin_arm_thread_state64_set_pc_fptr(ts, fptr)
175 /* Set pc field of arm_thread_state64_t to an already signed function pointer */
176 #define arm_thread_state64_set_pc_presigned_fptr(ts, fptr) \
177 __darwin_arm_thread_state64_set_pc_presigned_fptr(ts, fptr)
178 /* Return lr field of arm_thread_state64_t as a data pointer value */
179 #define arm_thread_state64_get_lr(ts) \
180 __darwin_arm_thread_state64_get_lr(ts)
181 /* Return lr field of arm_thread_state64_t as a function pointer. May return
182 * NULL if a valid function pointer cannot be constructed, the caller should
183 * fall back to the arm_thread_state64_get_lr() macro in that case. */
184 #define arm_thread_state64_get_lr_fptr(ts) \
185 __darwin_arm_thread_state64_get_lr_fptr(ts)
186 /* Set lr field of arm_thread_state64_t to a function pointer */
187 #define arm_thread_state64_set_lr_fptr(ts, fptr) \
188 __darwin_arm_thread_state64_set_lr_fptr(ts, fptr)
189 /* Set lr field of arm_thread_state64_t to an already signed function pointer */
190 #define arm_thread_state64_set_lr_presigned_fptr(ts, fptr) \
191 __darwin_arm_thread_state64_set_lr_presigned_fptr(ts, fptr)
192 /* Return sp field of arm_thread_state64_t as a data pointer value */
193 #define arm_thread_state64_get_sp(ts) \
194 __darwin_arm_thread_state64_get_sp(ts)
195 /* Set sp field of arm_thread_state64_t to a data pointer value */
196 #define arm_thread_state64_set_sp(ts, ptr) \
197 __darwin_arm_thread_state64_set_sp(ts, ptr)
198 /* Return fp field of arm_thread_state64_t as a data pointer value */
199 #define arm_thread_state64_get_fp(ts) \
200 __darwin_arm_thread_state64_get_fp(ts)
201 /* Set fp field of arm_thread_state64_t to a data pointer value */
202 #define arm_thread_state64_set_fp(ts, ptr) \
203 __darwin_arm_thread_state64_set_fp(ts, ptr)
204 /* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */
205 #define arm_thread_state64_ptrauth_strip(ts) \
206 __darwin_arm_thread_state64_ptrauth_strip(ts)
207
208 #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */
209 #endif /* !defined(KERNEL) */
210
211 struct arm_unified_thread_state {
212 arm_state_hdr_t ash;
213 union {
214 arm_thread_state32_t ts_32;
215 arm_thread_state64_t ts_64;
216 } uts;
217 };
218 #define ts_32 uts.ts_32
219 #define ts_64 uts.ts_64
220 typedef struct arm_unified_thread_state arm_unified_thread_state_t;
221
222 #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
223 (sizeof (arm_thread_state_t)/sizeof(uint32_t)))
224 #define ARM_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \
225 (sizeof (arm_thread_state32_t)/sizeof(uint32_t)))
226 #define ARM_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \
227 (sizeof (arm_thread_state64_t)/sizeof(uint32_t)))
228 #define ARM_UNIFIED_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
229 (sizeof (arm_unified_thread_state_t)/sizeof(uint32_t)))
230
231
232 typedef _STRUCT_ARM_VFP_STATE arm_vfp_state_t;
233 typedef _STRUCT_ARM_NEON_STATE arm_neon_state_t;
234 typedef _STRUCT_ARM_NEON_STATE arm_neon_state32_t;
235 typedef _STRUCT_ARM_NEON_STATE64 arm_neon_state64_t;
236
237
238 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state_t;
239 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state32_t;
240 typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t;
241 typedef _STRUCT_ARM_EXCEPTION_STATE64_V2 arm_exception_state64_v2_t;
242
243 typedef _STRUCT_ARM_DEBUG_STATE32 arm_debug_state32_t;
244 typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t;
245
246 typedef _STRUCT_ARM_PAGEIN_STATE arm_pagein_state_t;
247
248 typedef _STRUCT_ARM_SME_STATE arm_sme_state_t;
249 typedef _STRUCT_ARM_SVE_Z_STATE arm_sve_z_state_t;
250 typedef _STRUCT_ARM_SVE_P_STATE arm_sve_p_state_t;
251 typedef _STRUCT_ARM_SME_ZA_STATE arm_sme_za_state_t;
252 typedef _STRUCT_ARM_SME2_STATE arm_sme2_state_t;
253
254 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
255 /* See below for ARM64 kernel structure definition for arm_debug_state. */
256 #else /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
257 /*
258 * Otherwise not ARM64 kernel and we must preserve legacy ARM definitions of
259 * arm_debug_state for binary compatability of userland consumers of this file.
260 */
261 #if defined(__arm__)
262 typedef _STRUCT_ARM_DEBUG_STATE arm_debug_state_t;
263 #elif defined(__arm64__)
264 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t;
265 #else /* defined(__arm__) */
266 #error Undefined architecture
267 #endif /* defined(__arm__) */
268 #endif /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
269
270 #define ARM_VFP_STATE_COUNT ((mach_msg_type_number_t) \
271 (sizeof (arm_vfp_state_t)/sizeof(uint32_t)))
272
273 #define ARM_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \
274 (sizeof (arm_exception_state_t)/sizeof(uint32_t)))
275
276 #define ARM_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \
277 (sizeof (arm_exception_state64_t)/sizeof(uint32_t)))
278
279 #define ARM_EXCEPTION_STATE64_V2_COUNT ((mach_msg_type_number_t) \
280 (sizeof (arm_exception_state64_v2_t)/sizeof(uint32_t)))
281
282 #define ARM_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
283 (sizeof (arm_debug_state_t)/sizeof(uint32_t)))
284
285 #define ARM_DEBUG_STATE32_COUNT ((mach_msg_type_number_t) \
286 (sizeof (arm_debug_state32_t)/sizeof(uint32_t)))
287
288 #define ARM_PAGEIN_STATE_COUNT ((mach_msg_type_number_t) \
289 (sizeof (arm_pagein_state_t)/sizeof(uint32_t)))
290
291 #define ARM_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \
292 (sizeof (arm_debug_state64_t)/sizeof(uint32_t)))
293
294 #define ARM_NEON_STATE_COUNT ((mach_msg_type_number_t) \
295 (sizeof (arm_neon_state_t)/sizeof(uint32_t)))
296
297 #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \
298 (sizeof (arm_neon_state64_t)/sizeof(uint32_t)))
299
300 #define ARM_SME_STATE_COUNT ((mach_msg_type_number_t) \
301 (sizeof (arm_sme_state_t)/sizeof(uint32_t)))
302
303 #define ARM_SVE_Z_STATE_COUNT ((mach_msg_type_number_t) \
304 (sizeof (arm_sve_z_state_t)/sizeof(uint32_t)))
305
306 #define ARM_SVE_P_STATE_COUNT ((mach_msg_type_number_t) \
307 (sizeof (arm_sve_p_state_t)/sizeof(uint32_t)))
308
309 #define ARM_SME_ZA_STATE_COUNT ((mach_msg_type_number_t) \
310 (sizeof (arm_sme_za_state_t)/sizeof(uint32_t)))
311
312 #define ARM_SME2_STATE_COUNT ((mach_msg_type_number_t) \
313 (sizeof (arm_sme2_state_t)/sizeof(uint32_t)))
314
315 #define MACHINE_THREAD_STATE ARM_THREAD_STATE
316 #define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
317
318
319 /*
320 * Largest state on this machine:
321 */
322 #define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX
323
324 #ifdef XNU_KERNEL_PRIVATE
325
326 #if CONFIG_DTRACE
327 #define HAS_ADD_SAVED_STATE_PC 1
328 #define HAS_SET_SAVED_STATE_PC 1
329 #define HAS_SET_SAVED_STATE_LR 1
330 #define HAS_SET_SAVED_STATE_REG 1
331 #define HAS_MASK_SAVED_STATE_CPSR 1
332 #endif /* CONFIG_DTRACE */
333
334 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
335 #define HAS_SET_SAVED_STATE_CPSR 1
336 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
337
338 #if CONFIG_XNUPOST
339 #define HAS_ADD_SAVED_STATE_PC 1
340 #define HAS_SET_SAVED_STATE_PC 1
341 #define HAS_SET_SAVED_STATE_CPSR 1
342 #endif /* CONFIG_DTRACE */
343
344 #if DEBUG || DEVELOPMENT
345 #define HAS_ADD_SAVED_STATE_PC 1
346 #endif
347
348
349 static inline boolean_t
is_thread_state32(const arm_unified_thread_state_t * its)350 is_thread_state32(const arm_unified_thread_state_t *its)
351 {
352 return its->ash.flavor == ARM_THREAD_STATE32;
353 }
354
355 static inline boolean_t
is_thread_state64(const arm_unified_thread_state_t * its)356 is_thread_state64(const arm_unified_thread_state_t *its)
357 {
358 return its->ash.flavor == ARM_THREAD_STATE64;
359 }
360
361 static inline arm_thread_state32_t*
thread_state32(arm_unified_thread_state_t * its)362 thread_state32(arm_unified_thread_state_t *its)
363 {
364 return &its->ts_32;
365 }
366
367 static inline arm_thread_state64_t*
thread_state64(arm_unified_thread_state_t * its)368 thread_state64(arm_unified_thread_state_t *its)
369 {
370 return &its->ts_64;
371 }
372
373 static inline const arm_thread_state32_t*
const_thread_state32(const arm_unified_thread_state_t * its)374 const_thread_state32(const arm_unified_thread_state_t *its)
375 {
376 return &its->ts_32;
377 }
378
379 static inline const arm_thread_state64_t*
const_thread_state64(const arm_unified_thread_state_t * its)380 const_thread_state64(const arm_unified_thread_state_t *its)
381 {
382 return &its->ts_64;
383 }
384
385 #if defined(__arm64__)
386
387 #include <kern/assert.h>
388 #include <arm64/proc_reg.h>
389 #define CAST_ASSERT_SAFE(type, val) (assert((val) == ((type)(val))), (type)(val))
390
391 /*
392 * GPR context
393 */
394
395 struct arm_saved_state32 {
396 uint32_t r[13]; /* General purpose register r0-r12 */
397 uint32_t sp; /* Stack pointer r13 */
398 uint32_t lr; /* Link register r14 */
399 uint32_t pc; /* Program counter r15 */
400 uint32_t cpsr; /* Current program status register */
401 uint32_t far; /* Virtual fault address */
402 uint32_t esr; /* Exception syndrome register */
403 uint32_t exception; /* Exception number */
404 };
405 typedef struct arm_saved_state32 arm_saved_state32_t;
406
407 struct arm_saved_state32_tagged {
408 uint32_t tag;
409 struct arm_saved_state32 state;
410 };
411 typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t;
412
413 #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
414 (sizeof(arm_saved_state32_t)/sizeof(unsigned int)))
415
416 struct arm_saved_state64 {
417 uint64_t x[29]; /* General purpose registers x0-x28 */
418 uint64_t fp; /* Frame pointer x29 */
419 uint64_t lr; /* Link register x30 */
420 uint64_t sp; /* Stack pointer x31 */
421 uint64_t pc; /* Program counter */
422 uint32_t cpsr; /* Current program status register */
423 uint32_t reserved; /* Reserved padding */
424 uint64_t far; /* Virtual fault address */
425 uint64_t esr; /* Exception syndrome register */
426 #if HAS_APPLE_PAC
427 uint64_t jophash;
428 #endif /* HAS_APPLE_PAC */
429 };
430 typedef struct arm_saved_state64 arm_saved_state64_t;
431
432 #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
433 (sizeof(arm_saved_state64_t)/sizeof(unsigned int)))
434
435 struct arm_saved_state64_tagged {
436 uint32_t tag;
437 struct arm_saved_state64 state;
438 };
439 typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t;
440
441 struct arm_saved_state {
442 arm_state_hdr_t ash;
443 union {
444 struct arm_saved_state32 ss_32;
445 struct arm_saved_state64 ss_64;
446 } uss;
447 } __attribute__((aligned(16)));
448 #define ss_32 uss.ss_32
449 #define ss_64 uss.ss_64
450
451 typedef struct arm_saved_state arm_saved_state_t;
452
453 struct arm_kernel_saved_state {
454 uint64_t x[10]; /* General purpose registers x19-x28 */
455 uint64_t fp; /* Frame pointer x29 */
456 uint64_t lr; /* Link register x30 */
457 uint64_t sp; /* Stack pointer x31 */
458 /* Some things here we DO need to preserve */
459 uint8_t pc_was_in_userspace;
460 uint8_t ssbs;
461 uint8_t dit;
462 uint8_t uao;
463 #if HAS_MTE
464 uint8_t tco;
465 #endif
466 } __attribute__((aligned(16)));
467
468 typedef struct arm_kernel_saved_state arm_kernel_saved_state_t;
469
470 extern void ml_panic_on_invalid_old_cpsr(const arm_saved_state_t *) __attribute__((noreturn));
471
472 extern void ml_panic_on_invalid_new_cpsr(const arm_saved_state_t *, uint32_t) __attribute__((noreturn));
473
474 #if HAS_APPLE_PAC
475
476 #include <sys/cdefs.h>
477
478 /*
479 * Used by MANIPULATE_SIGNED_THREAD_STATE(), potentially from C++ (IOKit) code.
480 * Open-coded to prevent a circular dependency between mach/arm/thread_status.h
481 * and osfmk/arm/machine_routines.h.
482 */
483 __BEGIN_DECLS
484 extern uint64_t ml_pac_safe_interrupts_disable(void);
485 extern void ml_pac_safe_interrupts_restore(uint64_t);
486 __END_DECLS
487
488 /*
489 * Methods used to sign and check thread state to detect corruptions of saved
490 * thread state across exceptions and context switches.
491 */
492 extern void ml_sign_thread_state(arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
493
494 extern void ml_check_signed_state(const arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
495
496 /* XXX: including stddef.f here breaks ctfmerge on some builds, so use __builtin_offsetof() instead of offsetof() */
497 #define ss64_offsetof(x) __builtin_offsetof(struct arm_saved_state, ss_64.x)
498
499 /**
500 * Verify the signed thread state in _iss, execute the assembly instructions
501 * _instr, and re-sign the modified thread state. Varargs specify additional
502 * inputs.
503 *
504 * _instr may read or modify the thread state in the following registers:
505 *
506 * x0: _iss
507 * x1: authed _iss->ss_64.pc
508 * w2: authed _iss->ss_64.cpsr
509 * x3: authed _iss->ss_64.lr
510 * x4: authed _iss->ss_64.x16
511 * x5: authed _iss->ss_64.x17
512 * x6: scratch register
513 * x7: scratch register
514 * x8: scratch register
515 *
516 * If _instr makes no changes to the thread state, it may skip re-signing by
517 * branching to label 0.
518 */
519 #define MANIPULATE_SIGNED_THREAD_STATE(_iss, _instr, ...) \
520 do { \
521 uint64_t _intr = ml_pac_safe_interrupts_disable(); \
522 asm volatile ( \
523 "mov x9, lr" "\n" \
524 "mov x0, %[iss]" "\n" \
525 "msr SPSel, #1" "\n" \
526 "ldp x4, x5, [x0, %[SS64_X16]]" "\n" \
527 "ldr x7, [x0, %[SS64_PC]]" "\n" \
528 "ldr w8, [x0, %[SS64_CPSR]]" "\n" \
529 "ldr x3, [x0, %[SS64_LR]]" "\n" \
530 "mov x1, x7" "\n" \
531 "mov w2, w8" "\n" \
532 "bl _ml_check_signed_state" "\n" \
533 "mov x1, x7" "\n" \
534 "mov w2, w8" "\n" \
535 _instr "\n" \
536 "bl _ml_sign_thread_state" "\n" \
537 "0:" "\n" \
538 "msr SPSel, #0" "\n" \
539 "mov lr, x9" "\n" \
540 : \
541 : [iss] "r"(_iss), \
542 [SS64_X16] "i"(ss64_offsetof(x[16])), \
543 [SS64_PC] "i"(ss64_offsetof(pc)), \
544 [SS64_CPSR] "i"(ss64_offsetof(cpsr)), \
545 [SS64_LR] "i"(ss64_offsetof(lr)),##__VA_ARGS__ \
546 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", \
547 "x9", "x16", "x17" \
548 ); \
549 ml_pac_safe_interrupts_restore(_intr); \
550 } while (0)
551
552 #define VERIFY_USER_THREAD_STATE_INSTR \
553 "and w6, w2, %[CPSR_EL_MASK]" "\n" \
554 "cmp w6, %[CPSR_EL0]" "\n" \
555 "b.eq 1f" "\n" \
556 "bl _ml_panic_on_invalid_old_cpsr" "\n" \
557 "brk #0" "\n" \
558 "1:" "\n"
559
560 #define VERIFY_USER_THREAD_STATE_INPUTS \
561 [CPSR_EL_MASK] "i"(PSR64_MODE_EL_MASK), \
562 [CPSR_EL0] "i"(PSR64_MODE_EL0)
563
564 #define MANIPULATE_SIGNED_USER_THREAD_STATE(_iss, _instr, ...) \
565 MANIPULATE_SIGNED_THREAD_STATE(_iss, \
566 VERIFY_USER_THREAD_STATE_INSTR \
567 _instr, \
568 VERIFY_USER_THREAD_STATE_INPUTS, ##__VA_ARGS__)
569
570 static inline void
check_and_sign_copied_user_thread_state(arm_saved_state_t * dst,const arm_saved_state_t * src)571 check_and_sign_copied_user_thread_state(arm_saved_state_t *dst, const arm_saved_state_t *src)
572 {
573 MANIPULATE_SIGNED_USER_THREAD_STATE(src,
574 "mov x0, %[dst]",
575 [dst] "r"(dst)
576 );
577 }
578 #endif /* HAS_APPLE_PAC */
579
580 static inline boolean_t
is_saved_state32(const arm_saved_state_t * iss)581 is_saved_state32(const arm_saved_state_t *iss)
582 {
583 return iss->ash.flavor == ARM_SAVED_STATE32;
584 }
585
586 static inline boolean_t
is_saved_state64(const arm_saved_state_t * iss)587 is_saved_state64(const arm_saved_state_t *iss)
588 {
589 return iss->ash.flavor == ARM_SAVED_STATE64;
590 }
591
592 static inline arm_saved_state32_t*
saved_state32(arm_saved_state_t * iss)593 saved_state32(arm_saved_state_t *iss)
594 {
595 return &iss->ss_32;
596 }
597
598 static inline const arm_saved_state32_t*
const_saved_state32(const arm_saved_state_t * iss)599 const_saved_state32(const arm_saved_state_t *iss)
600 {
601 return &iss->ss_32;
602 }
603
604 static inline arm_saved_state64_t*
saved_state64(arm_saved_state_t * iss)605 saved_state64(arm_saved_state_t *iss)
606 {
607 return &iss->ss_64;
608 }
609
610 static inline const arm_saved_state64_t*
const_saved_state64(const arm_saved_state_t * iss)611 const_saved_state64(const arm_saved_state_t *iss)
612 {
613 return &iss->ss_64;
614 }
615
616 static inline register_t
get_saved_state_pc(const arm_saved_state_t * iss)617 get_saved_state_pc(const arm_saved_state_t *iss)
618 {
619 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc);
620 }
621
622 #if HAS_ADD_SAVED_STATE_PC
623 static inline void
add_saved_state_pc(arm_saved_state_t * iss,int diff)624 add_saved_state_pc(arm_saved_state_t *iss, int diff)
625 {
626 if (is_saved_state32(iss)) {
627 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
628 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
629 } else {
630 #if HAS_APPLE_PAC
631 MANIPULATE_SIGNED_THREAD_STATE(iss,
632 "mov w6, %w[diff] \n"
633 "add x1, x1, w6, sxtw \n"
634 "str x1, [x0, %[SS64_PC]] \n",
635 [diff] "r"(diff)
636 );
637 #else
638 saved_state64(iss)->pc += (unsigned long)diff;
639 #endif /* HAS_APPLE_PAC */
640 }
641 }
642 #endif /* HAS_ADD_SAVED_STATE_PC */
643
644 static inline void
add_user_saved_state_pc(arm_saved_state_t * iss,int diff)645 add_user_saved_state_pc(arm_saved_state_t *iss, int diff)
646 {
647 if (is_saved_state32(iss)) {
648 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
649 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
650 } else {
651 #if HAS_APPLE_PAC
652 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
653 "mov w6, %w[diff] \n"
654 "add x1, x1, w6, sxtw \n"
655 "str x1, [x0, %[SS64_PC]] \n",
656 [diff] "r"(diff)
657 );
658 #else
659 saved_state64(iss)->pc += (unsigned long)diff;
660 #endif /* HAS_APPLE_PAC */
661 }
662 }
663
664 #if HAS_SET_SAVED_STATE_PC
665 static inline void
set_saved_state_pc(arm_saved_state_t * iss,register_t pc)666 set_saved_state_pc(arm_saved_state_t *iss, register_t pc)
667 {
668 if (is_saved_state32(iss)) {
669 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
670 } else {
671 #if HAS_APPLE_PAC
672 MANIPULATE_SIGNED_THREAD_STATE(iss,
673 "mov x1, %[pc] \n"
674 "str x1, [x0, %[SS64_PC]] \n",
675 [pc] "r"(pc)
676 );
677 #else
678 saved_state64(iss)->pc = (unsigned long)pc;
679 #endif /* HAS_APPLE_PAC */
680 }
681 }
682 #endif /* HAS_SET_SAVED_STATE_PC */
683
684 static inline void
set_user_saved_state_pc(arm_saved_state_t * iss,register_t pc)685 set_user_saved_state_pc(arm_saved_state_t *iss, register_t pc)
686 {
687 if (is_saved_state32(iss)) {
688 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
689 } else {
690 #if HAS_APPLE_PAC
691 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
692 "mov x1, %[pc] \n"
693 "str x1, [x0, %[SS64_PC]] \n",
694 [pc] "r"(pc)
695 );
696 #else
697 saved_state64(iss)->pc = (unsigned long)pc;
698 #endif /* HAS_APPLE_PAC */
699 }
700 }
701
702 static inline register_t
get_saved_state_sp(const arm_saved_state_t * iss)703 get_saved_state_sp(const arm_saved_state_t *iss)
704 {
705 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp);
706 }
707
708 static inline void
set_saved_state_sp(arm_saved_state_t * iss,register_t sp)709 set_saved_state_sp(arm_saved_state_t *iss, register_t sp)
710 {
711 if (is_saved_state32(iss)) {
712 saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp);
713 } else {
714 saved_state64(iss)->sp = (uint64_t)sp;
715 }
716 }
717
718 static inline register_t
get_saved_state_lr(const arm_saved_state_t * iss)719 get_saved_state_lr(const arm_saved_state_t *iss)
720 {
721 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr);
722 }
723
724 #if HAS_SET_SAVED_STATE_LR
725 static inline void
set_saved_state_lr(arm_saved_state_t * iss,register_t lr)726 set_saved_state_lr(arm_saved_state_t *iss, register_t lr)
727 {
728 if (is_saved_state32(iss)) {
729 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
730 } else {
731 #if HAS_APPLE_PAC
732 MANIPULATE_SIGNED_THREAD_STATE(iss,
733 "mov x3, %[lr] \n"
734 "str x3, [x0, %[SS64_LR]] \n",
735 [lr] "r"(lr)
736 );
737 #else
738 saved_state64(iss)->lr = (unsigned long)lr;
739 #endif /* HAS_APPLE_PAC */
740 }
741 }
742 #endif /* HAS_SET_SAVED_STATE_PC */
743
744 static inline void
set_user_saved_state_lr(arm_saved_state_t * iss,register_t lr)745 set_user_saved_state_lr(arm_saved_state_t *iss, register_t lr)
746 {
747 if (is_saved_state32(iss)) {
748 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
749 } else {
750 #if HAS_APPLE_PAC
751 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
752 "mov x3, %[lr] \n"
753 "str x3, [x0, %[SS64_LR]] \n",
754 [lr] "r"(lr)
755 );
756 #else
757 saved_state64(iss)->lr = (unsigned long)lr;
758 #endif /* HAS_APPLE_PAC */
759 }
760 }
761
762 static inline register_t
get_saved_state_fp(const arm_saved_state_t * iss)763 get_saved_state_fp(const arm_saved_state_t *iss)
764 {
765 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp);
766 }
767
768 static inline void
set_saved_state_fp(arm_saved_state_t * iss,register_t fp)769 set_saved_state_fp(arm_saved_state_t *iss, register_t fp)
770 {
771 if (is_saved_state32(iss)) {
772 saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp);
773 } else {
774 saved_state64(iss)->fp = (uint64_t)fp;
775 }
776 }
777
778 static inline int
check_saved_state_reglimit(const arm_saved_state_t * iss,unsigned reg)779 check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg)
780 {
781 return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT);
782 }
783
784 static inline register_t
get_saved_state_reg(const arm_saved_state_t * iss,unsigned reg)785 get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg)
786 {
787 if (!check_saved_state_reglimit(iss, reg)) {
788 return 0;
789 }
790
791 return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]));
792 }
793
794 #if HAS_SET_SAVED_STATE_REG
795 static inline void
set_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)796 set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
797 {
798 if (!check_saved_state_reglimit(iss, reg)) {
799 return;
800 }
801
802 if (is_saved_state32(iss)) {
803 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
804 } else {
805 #if HAS_APPLE_PAC
806 /* x16 and x17 are part of the jophash */
807 if (reg == 16) {
808 MANIPULATE_SIGNED_THREAD_STATE(iss,
809 "mov x4, %[value] \n"
810 "str x4, [x0, %[SS64_X16]] \n",
811 [value] "r"(value)
812 );
813 return;
814 } else if (reg == 17) {
815 MANIPULATE_SIGNED_THREAD_STATE(iss,
816 "mov x5, %[value] \n"
817 "str x5, [x0, %[SS64_X17]] \n",
818 [value] "r"(value),
819 [SS64_X17] "i"(ss64_offsetof(x[17]))
820 );
821 return;
822 }
823 #endif
824 saved_state64(iss)->x[reg] = (uint64_t)value;
825 }
826 }
827 #endif /* HAS_SET_SAVED_STATE_REG */
828
829 static inline void
set_user_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)830 set_user_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
831 {
832 if (!check_saved_state_reglimit(iss, reg)) {
833 return;
834 }
835
836 if (is_saved_state32(iss)) {
837 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
838 } else {
839 #if HAS_APPLE_PAC
840 /* x16 and x17 are part of the jophash */
841 if (reg == 16) {
842 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
843 "mov x4, %[value] \n"
844 "str x4, [x0, %[SS64_X16]] \n",
845 [value] "r"(value)
846 );
847 return;
848 } else if (reg == 17) {
849 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
850 "mov x5, %[value] \n"
851 "str x5, [x0, %[SS64_X17]] \n",
852 [value] "r"(value),
853 [SS64_X17] "i"(ss64_offsetof(x[17]))
854 );
855 return;
856 }
857 #endif
858 saved_state64(iss)->x[reg] = (uint64_t)value;
859 }
860 }
861
862
863 static inline uint32_t
get_saved_state_cpsr(const arm_saved_state_t * iss)864 get_saved_state_cpsr(const arm_saved_state_t *iss)
865 {
866 return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr;
867 }
868
869 #if HAS_MASK_SAVED_STATE_CPSR
870 static inline void
mask_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)871 mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
872 {
873 if (is_saved_state32(iss)) {
874 saved_state32(iss)->cpsr |= set_bits;
875 saved_state32(iss)->cpsr &= ~clear_bits;
876 } else {
877 #if HAS_APPLE_PAC
878 MANIPULATE_SIGNED_THREAD_STATE(iss,
879 "mov w6, %w[set_bits] \n"
880 "orr w2, w2, w6, lsl #0 \n"
881 "mov w6, %w[clear_bits] \n"
882 "bic w2, w2, w6, lsl #0 \n"
883 "str w2, [x0, %[SS64_CPSR]] \n",
884 [set_bits] "r"(set_bits),
885 [clear_bits] "r"(clear_bits)
886 );
887 #else
888 saved_state64(iss)->cpsr |= set_bits;
889 saved_state64(iss)->cpsr &= ~clear_bits;
890 #endif /* HAS_APPLE_PAC */
891 }
892 }
893 #endif /* HAS_MASK_SAVED_STATE_CPSR */
894
895 static inline void
mask_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)896 mask_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
897 {
898 if (is_saved_state32(iss)) {
899 uint32_t new_cpsr = saved_state32(iss)->cpsr;
900 new_cpsr |= set_bits;
901 new_cpsr &= ~clear_bits;
902 if (!PSR_IS_USER(new_cpsr)) {
903 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
904 }
905 saved_state32(iss)->cpsr = new_cpsr;
906 } else {
907 #if HAS_APPLE_PAC
908 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
909 "mov w6, %w[set_bits] \n"
910 "orr w2, w2, w6, lsl #0 \n"
911 "mov w6, %w[clear_bits] \n"
912 "bic w2, w2, w6, lsl #0 \n"
913 "and w6, w2, %[CPSR_EL_MASK] \n"
914 "cmp w6, %[CPSR_EL0] \n"
915 "b.eq 1f \n"
916 "mov w1, w2 \n"
917 "bl _ml_panic_on_invalid_new_cpsr \n"
918 "brk #0 \n"
919 "1: \n"
920 "str w2, [x0, %[SS64_CPSR]] \n",
921 [set_bits] "r"(set_bits),
922 [clear_bits] "r"(clear_bits)
923 );
924 #else
925 uint32_t new_cpsr = saved_state64(iss)->cpsr;
926 new_cpsr |= set_bits;
927 new_cpsr &= ~clear_bits;
928 if (!PSR64_IS_USER(new_cpsr)) {
929 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
930 }
931 saved_state64(iss)->cpsr = new_cpsr;
932 #endif /* HAS_APPLE_PAC */
933 }
934 }
935
936 #if HAS_SET_SAVED_STATE_CPSR
937 static inline void
set_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)938 set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
939 {
940 if (is_saved_state32(iss)) {
941 saved_state32(iss)->cpsr = cpsr;
942 } else {
943 #if HAS_APPLE_PAC
944 MANIPULATE_SIGNED_THREAD_STATE(iss,
945 "mov w2, %w[cpsr] \n"
946 "str w2, [x0, %[SS64_CPSR]] \n",
947 [cpsr] "r"(cpsr)
948 );
949 #else
950 saved_state64(iss)->cpsr = cpsr;
951 #endif /* HAS_APPLE_PAC */
952 }
953 }
954 #endif /* HAS_SET_SAVED_STATE_CPSR */
955
956 static inline void
set_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)957 set_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
958 {
959 if (is_saved_state32(iss)) {
960 if (!PSR_IS_USER(cpsr)) {
961 ml_panic_on_invalid_new_cpsr(iss, cpsr);
962 }
963 saved_state32(iss)->cpsr = cpsr;
964 } else {
965 #if HAS_APPLE_PAC
966 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
967 "mov w2, %w[cpsr] \n"
968 "and w6, w2, %[CPSR_EL_MASK] \n"
969 "cmp w6, %[CPSR_EL0] \n"
970 "b.eq 1f \n"
971 "mov w1, w2 \n"
972 "bl _ml_panic_on_invalid_new_cpsr \n"
973 "brk #0 \n"
974 "1: \n"
975 "str w2, [x0, %[SS64_CPSR]] \n",
976 [cpsr] "r"(cpsr)
977 );
978 #else
979 if (!PSR64_IS_USER(cpsr)) {
980 ml_panic_on_invalid_new_cpsr(iss, cpsr);
981 }
982 saved_state64(iss)->cpsr = cpsr;
983 #endif /* HAS_APPLE_PAC */
984 }
985 }
986
987 static inline register_t
get_saved_state_far(const arm_saved_state_t * iss)988 get_saved_state_far(const arm_saved_state_t *iss)
989 {
990 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far);
991 }
992
993 static inline void
set_saved_state_far(arm_saved_state_t * iss,register_t far)994 set_saved_state_far(arm_saved_state_t *iss, register_t far)
995 {
996 if (is_saved_state32(iss)) {
997 saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far);
998 } else {
999 saved_state64(iss)->far = (uint64_t)far;
1000 }
1001 }
1002
1003 static inline uint64_t
get_saved_state_esr(const arm_saved_state_t * iss)1004 get_saved_state_esr(const arm_saved_state_t *iss)
1005 {
1006 return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr;
1007 }
1008
1009 static inline void
set_saved_state_esr(arm_saved_state_t * iss,uint64_t esr)1010 set_saved_state_esr(arm_saved_state_t *iss, uint64_t esr)
1011 {
1012 if (is_saved_state32(iss)) {
1013 assert(esr < (uint64_t) (uint32_t) -1);
1014 saved_state32(iss)->esr = (uint32_t) esr;
1015 } else {
1016 saved_state64(iss)->esr = esr;
1017 }
1018 }
1019
1020 extern void panic_unimplemented(void);
1021
1022 /**
1023 * Extracts the SVC (Supervisor Call) number from the appropriate GPR (General
1024 * Purpose Register).
1025 *
1026 * @param[in] iss the 32-bit or 64-bit ARM saved state (i.e. trap frame).
1027 *
1028 * @return The SVC number.
1029 */
1030 static inline int
get_saved_state_svc_number(const arm_saved_state_t * iss)1031 get_saved_state_svc_number(const arm_saved_state_t *iss)
1032 {
1033 return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */
1034 }
1035
1036 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t;
1037
1038 struct arm_debug_aggregate_state {
1039 arm_state_hdr_t dsh;
1040 union {
1041 arm_debug_state32_t ds32;
1042 arm_debug_state64_t ds64;
1043 } uds;
1044 os_refcnt_t ref;
1045 } __attribute__((aligned(16)));
1046
1047 typedef struct arm_debug_aggregate_state arm_debug_state_t;
1048
1049 #define ARM_LEGACY_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
1050 (sizeof (arm_legacy_debug_state_t)/sizeof(uint32_t)))
1051
1052 /*
1053 * NEON context
1054 */
1055 typedef __uint128_t uint128_t;
1056 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2)));
1057 typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4)));
1058
1059 struct arm_neon_saved_state32 {
1060 union {
1061 uint128_t q[16];
1062 uint64_t d[32];
1063 uint32_t s[32];
1064 } v;
1065 uint32_t fpsr;
1066 uint32_t fpcr;
1067 };
1068 typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t;
1069
1070 #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
1071 (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int)))
1072
1073 struct arm_neon_saved_state64 {
1074 union {
1075 uint128_t q[32];
1076 uint64x2_t d[32];
1077 uint32x4_t s[32];
1078 } v;
1079 uint32_t fpsr;
1080 uint32_t fpcr;
1081 };
1082 typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t;
1083
1084 #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
1085 (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int)))
1086
1087 struct arm_neon_saved_state {
1088 arm_state_hdr_t nsh;
1089 union {
1090 struct arm_neon_saved_state32 ns_32;
1091 struct arm_neon_saved_state64 ns_64;
1092 } uns;
1093 };
1094 typedef struct arm_neon_saved_state arm_neon_saved_state_t;
1095 #define ns_32 uns.ns_32
1096 #define ns_64 uns.ns_64
1097
1098 struct arm_kernel_neon_saved_state {
1099 uint64_t d[8];
1100 uint32_t fpcr;
1101 };
1102 typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t;
1103
1104 static inline boolean_t
is_neon_saved_state32(const arm_neon_saved_state_t * state)1105 is_neon_saved_state32(const arm_neon_saved_state_t *state)
1106 {
1107 return state->nsh.flavor == ARM_NEON_SAVED_STATE32;
1108 }
1109
1110 static inline boolean_t
is_neon_saved_state64(const arm_neon_saved_state_t * state)1111 is_neon_saved_state64(const arm_neon_saved_state_t *state)
1112 {
1113 return state->nsh.flavor == ARM_NEON_SAVED_STATE64;
1114 }
1115
1116 static inline arm_neon_saved_state32_t *
neon_state32(arm_neon_saved_state_t * state)1117 neon_state32(arm_neon_saved_state_t *state)
1118 {
1119 return &state->ns_32;
1120 }
1121
1122 static inline arm_neon_saved_state64_t *
neon_state64(arm_neon_saved_state_t * state)1123 neon_state64(arm_neon_saved_state_t *state)
1124 {
1125 return &state->ns_64;
1126 }
1127
1128
1129 #if HAS_ARM_FEAT_SME
1130
1131
1132 struct arm_sme_saved_state;
1133 typedef struct arm_sme_saved_state arm_sme_saved_state_t;
1134
1135 #if !__has_ptrcheck
1136 typedef struct {
1137 uint8_t zt0[64];
1138 uint8_t __z_p_za[];
1139 } arm_sme_context_t;
1140
1141 struct arm_sme_saved_state {
1142 arm_state_hdr_t hdr;
1143 uint64_t svcr;
1144 uint16_t svl_b;
1145 arm_sme_context_t context;
1146 };
1147
1148 static inline size_t
arm_sme_z_size(uint16_t svl_b)1149 arm_sme_z_size(uint16_t svl_b)
1150 {
1151 return 32 * svl_b;
1152 }
1153
1154 static inline size_t
arm_sme_p_size(uint16_t svl_b)1155 arm_sme_p_size(uint16_t svl_b)
1156 {
1157 return 2 * svl_b;
1158 }
1159
1160 static inline size_t
arm_sme_za_size(uint16_t svl_b)1161 arm_sme_za_size(uint16_t svl_b)
1162 {
1163 return svl_b * svl_b;
1164 }
1165
1166 static inline mach_msg_type_number_t
arm_sme_saved_state_count(uint16_t svl_b)1167 arm_sme_saved_state_count(uint16_t svl_b)
1168 {
1169 assert(svl_b % 16 == 0);
1170 size_t size = sizeof(arm_sme_saved_state_t) +
1171 arm_sme_z_size(svl_b) +
1172 arm_sme_p_size(svl_b) +
1173 arm_sme_za_size(svl_b);
1174 return (mach_msg_type_number_t)(size / sizeof(unsigned int));
1175 }
1176
1177 static inline uint8_t *
arm_sme_z(arm_sme_context_t * ss)1178 arm_sme_z(arm_sme_context_t *ss)
1179 {
1180 return ss->__z_p_za;
1181 }
1182
1183 static inline const uint8_t *
const_arm_sme_z(const arm_sme_context_t * ss)1184 const_arm_sme_z(const arm_sme_context_t *ss)
1185 {
1186 return ss->__z_p_za;
1187 }
1188
1189 static inline uint8_t *
arm_sme_p(arm_sme_context_t * ss,uint16_t svl_b)1190 arm_sme_p(arm_sme_context_t *ss, uint16_t svl_b)
1191 {
1192 return ss->__z_p_za + arm_sme_z_size(svl_b);
1193 }
1194
1195 static inline const uint8_t *
const_arm_sme_p(const arm_sme_context_t * ss,uint16_t svl_b)1196 const_arm_sme_p(const arm_sme_context_t *ss, uint16_t svl_b)
1197 {
1198 return ss->__z_p_za + arm_sme_z_size(svl_b);
1199 }
1200
1201 static inline uint8_t *
arm_sme_za(arm_sme_context_t * ss,uint16_t svl_b)1202 arm_sme_za(arm_sme_context_t *ss, uint16_t svl_b)
1203 {
1204 return ss->__z_p_za + arm_sme_z_size(svl_b) + arm_sme_p_size(svl_b);
1205 }
1206
1207 static inline const uint8_t *
const_arm_sme_za(const arm_sme_context_t * ss,uint16_t svl_b)1208 const_arm_sme_za(const arm_sme_context_t *ss, uint16_t svl_b)
1209 {
1210 return ss->__z_p_za + arm_sme_z_size(svl_b) + arm_sme_p_size(svl_b);
1211 }
1212
1213 #endif /* !__has_ptrcheck */
1214 #endif /* HAS_ARM_FEAT_SME */
1215
1216 /*
1217 * Aggregated context
1218 */
1219
1220 struct arm_context {
1221 struct arm_saved_state ss;
1222 struct arm_neon_saved_state ns;
1223 };
1224 typedef struct arm_context arm_context_t;
1225
1226 struct arm_kernel_context {
1227 struct arm_kernel_saved_state ss;
1228 struct arm_kernel_neon_saved_state ns;
1229 };
1230 typedef struct arm_kernel_context arm_kernel_context_t;
1231
1232 extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*);
1233 extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*);
1234
1235 #else /* defined(__arm64__) */
1236 #error Unknown arch
1237 #endif /* defined(__arm64__) */
1238
1239 extern void saved_state_to_thread_state32(const arm_saved_state_t*, arm_thread_state32_t*);
1240 extern void thread_state32_to_saved_state(const arm_thread_state32_t*, arm_saved_state_t*);
1241
1242 #endif /* XNU_KERNEL_PRIVATE */
1243
1244 #endif /* defined (__arm__) || defined (__arm64__) */
1245
1246 #endif /* _ARM_THREAD_STATUS_H_ */
1247