1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * FILE_ID: thread_status.h
30 */
31
32
33 #ifndef _ARM_THREAD_STATUS_H_
34 #define _ARM_THREAD_STATUS_H_
35
36 #if defined (__arm__) || defined (__arm64__)
37
38 #include <mach/machine/_structs.h>
39 #include <mach/machine/thread_state.h>
40 #include <mach/message.h>
41 #include <mach/vm_types.h>
42
43 #ifdef XNU_KERNEL_PRIVATE
44 #include <os/refcnt.h>
45 #endif
46
47 /*
48 * Support for determining the state of a thread
49 */
50
51
52 /*
53 * Flavors
54 */
55
56 #define ARM_THREAD_STATE 1
57 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE
58 #define ARM_VFP_STATE 2
59 #define ARM_EXCEPTION_STATE 3
60 #define ARM_DEBUG_STATE 4 /* pre-armv8 */
61 #define THREAD_STATE_NONE 5
62 #define ARM_THREAD_STATE64 6
63 #define ARM_EXCEPTION_STATE64 7
64 // ARM_THREAD_STATE_LAST 8 /* legacy */
65 #define ARM_THREAD_STATE32 9
66 #define ARM_EXCEPTION_STATE64_V2 10
67
68 #ifdef XNU_KERNEL_PRIVATE
69 #define X86_THREAD_STATE_NONE 13 /* i386/thread_status.h THREAD_STATE_NONE */
70 #endif /* XNU_KERNEL_PRIVATE */
71
72 /* API */
73 #define ARM_DEBUG_STATE32 14
74 #define ARM_DEBUG_STATE64 15
75 #define ARM_NEON_STATE 16
76 #define ARM_NEON_STATE64 17
77 #define ARM_CPMU_STATE64 18
78
79 #ifdef XNU_KERNEL_PRIVATE
80 /* For kernel use */
81 #define ARM_SAVED_STATE32 20
82 #define ARM_SAVED_STATE64 21
83 #define ARM_NEON_SAVED_STATE32 22
84 #define ARM_NEON_SAVED_STATE64 23
85 #endif /* XNU_KERNEL_PRIVATE */
86
87 #define ARM_PAGEIN_STATE 27
88
89 #if XNU_KERNEL_PRIVATE
90 /* For kernel use */
91 #define ARM_SME_SAVED_STATE 28
92 #endif /* XNU_KERNEL_PRIVATE */
93
94 #define THREAD_STATE_FLAVORS 29 /* This must be updated to 1 more than the highest numerical state flavor */
95
96 #ifndef ARM_STATE_FLAVOR_IS_OTHER_VALID
97 #define ARM_STATE_FLAVOR_IS_OTHER_VALID(_flavor_) 0
98 #endif
99
100 #define FLAVOR_MODIFIES_CORE_CPU_REGISTERS(x) \
101 ((x == ARM_THREAD_STATE) || \
102 (x == ARM_THREAD_STATE32) || \
103 (x == ARM_THREAD_STATE64))
104
105 #define VALID_THREAD_STATE_FLAVOR(x) \
106 ((x == ARM_THREAD_STATE) || \
107 (x == ARM_VFP_STATE) || \
108 (x == ARM_EXCEPTION_STATE) || \
109 (x == ARM_DEBUG_STATE) || \
110 (x == THREAD_STATE_NONE) || \
111 (x == ARM_THREAD_STATE32) || \
112 (x == ARM_THREAD_STATE64) || \
113 (x == ARM_EXCEPTION_STATE64) || \
114 (x == ARM_EXCEPTION_STATE64_V2) || \
115 (x == ARM_NEON_STATE) || \
116 (x == ARM_NEON_STATE64) || \
117 (x == ARM_DEBUG_STATE32) || \
118 (x == ARM_DEBUG_STATE64) || \
119 (x == ARM_PAGEIN_STATE) || \
120 (ARM_STATE_FLAVOR_IS_OTHER_VALID(x)))
121
122 struct arm_state_hdr {
123 uint32_t flavor;
124 uint32_t count;
125 };
126 typedef struct arm_state_hdr arm_state_hdr_t;
127
128 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state_t;
129 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state32_t;
130 typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t;
131
132 #if !defined(KERNEL)
133 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__)
134
135 /* Accessor macros for arm_thread_state64_t pointer fields */
136
137 /* Return pc field of arm_thread_state64_t as a data pointer value */
138 #define arm_thread_state64_get_pc(ts) \
139 __darwin_arm_thread_state64_get_pc(ts)
140 /* Return pc field of arm_thread_state64_t as a function pointer. May return
141 * NULL if a valid function pointer cannot be constructed, the caller should
142 * fall back to the arm_thread_state64_get_pc() macro in that case. */
143 #define arm_thread_state64_get_pc_fptr(ts) \
144 __darwin_arm_thread_state64_get_pc_fptr(ts)
145 /* Set pc field of arm_thread_state64_t to a function pointer */
146 #define arm_thread_state64_set_pc_fptr(ts, fptr) \
147 __darwin_arm_thread_state64_set_pc_fptr(ts, fptr)
148 /* Set pc field of arm_thread_state64_t to an already signed function pointer */
149 #define arm_thread_state64_set_pc_presigned_fptr(ts, fptr) \
150 __darwin_arm_thread_state64_set_pc_presigned_fptr(ts, fptr)
151 /* Return lr field of arm_thread_state64_t as a data pointer value */
152 #define arm_thread_state64_get_lr(ts) \
153 __darwin_arm_thread_state64_get_lr(ts)
154 /* Return lr field of arm_thread_state64_t as a function pointer. May return
155 * NULL if a valid function pointer cannot be constructed, the caller should
156 * fall back to the arm_thread_state64_get_lr() macro in that case. */
157 #define arm_thread_state64_get_lr_fptr(ts) \
158 __darwin_arm_thread_state64_get_lr_fptr(ts)
159 /* Set lr field of arm_thread_state64_t to a function pointer */
160 #define arm_thread_state64_set_lr_fptr(ts, fptr) \
161 __darwin_arm_thread_state64_set_lr_fptr(ts, fptr)
162 /* Set lr field of arm_thread_state64_t to an already signed function pointer */
163 #define arm_thread_state64_set_lr_presigned_fptr(ts, fptr) \
164 __darwin_arm_thread_state64_set_lr_presigned_fptr(ts, fptr)
165 /* Return sp field of arm_thread_state64_t as a data pointer value */
166 #define arm_thread_state64_get_sp(ts) \
167 __darwin_arm_thread_state64_get_sp(ts)
168 /* Set sp field of arm_thread_state64_t to a data pointer value */
169 #define arm_thread_state64_set_sp(ts, ptr) \
170 __darwin_arm_thread_state64_set_sp(ts, ptr)
171 /* Return fp field of arm_thread_state64_t as a data pointer value */
172 #define arm_thread_state64_get_fp(ts) \
173 __darwin_arm_thread_state64_get_fp(ts)
174 /* Set fp field of arm_thread_state64_t to a data pointer value */
175 #define arm_thread_state64_set_fp(ts, ptr) \
176 __darwin_arm_thread_state64_set_fp(ts, ptr)
177 /* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */
178 #define arm_thread_state64_ptrauth_strip(ts) \
179 __darwin_arm_thread_state64_ptrauth_strip(ts)
180
181 #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */
182 #endif /* !defined(KERNEL) */
183
184 struct arm_unified_thread_state {
185 arm_state_hdr_t ash;
186 union {
187 arm_thread_state32_t ts_32;
188 arm_thread_state64_t ts_64;
189 } uts;
190 };
191 #define ts_32 uts.ts_32
192 #define ts_64 uts.ts_64
193 typedef struct arm_unified_thread_state arm_unified_thread_state_t;
194
195 #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
196 (sizeof (arm_thread_state_t)/sizeof(uint32_t)))
197 #define ARM_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \
198 (sizeof (arm_thread_state32_t)/sizeof(uint32_t)))
199 #define ARM_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \
200 (sizeof (arm_thread_state64_t)/sizeof(uint32_t)))
201 #define ARM_UNIFIED_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
202 (sizeof (arm_unified_thread_state_t)/sizeof(uint32_t)))
203
204
205 typedef _STRUCT_ARM_VFP_STATE arm_vfp_state_t;
206 typedef _STRUCT_ARM_NEON_STATE arm_neon_state_t;
207 typedef _STRUCT_ARM_NEON_STATE arm_neon_state32_t;
208 typedef _STRUCT_ARM_NEON_STATE64 arm_neon_state64_t;
209
210
211 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state_t;
212 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state32_t;
213 typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t;
214 typedef _STRUCT_ARM_EXCEPTION_STATE64_V2 arm_exception_state64_v2_t;
215
216 typedef _STRUCT_ARM_DEBUG_STATE32 arm_debug_state32_t;
217 typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t;
218
219 typedef _STRUCT_ARM_PAGEIN_STATE arm_pagein_state_t;
220
221 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
222 /* See below for ARM64 kernel structure definition for arm_debug_state. */
223 #else /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
224 /*
225 * Otherwise not ARM64 kernel and we must preserve legacy ARM definitions of
226 * arm_debug_state for binary compatability of userland consumers of this file.
227 */
228 #if defined(__arm__)
229 typedef _STRUCT_ARM_DEBUG_STATE arm_debug_state_t;
230 #elif defined(__arm64__)
231 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t;
232 #else /* defined(__arm__) */
233 #error Undefined architecture
234 #endif /* defined(__arm__) */
235 #endif /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
236
237 #define ARM_VFP_STATE_COUNT ((mach_msg_type_number_t) \
238 (sizeof (arm_vfp_state_t)/sizeof(uint32_t)))
239
240 #define ARM_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \
241 (sizeof (arm_exception_state_t)/sizeof(uint32_t)))
242
243 #define ARM_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \
244 (sizeof (arm_exception_state64_t)/sizeof(uint32_t)))
245
246 #define ARM_EXCEPTION_STATE64_V2_COUNT ((mach_msg_type_number_t) \
247 (sizeof (arm_exception_state64_v2_t)/sizeof(uint32_t)))
248
249 #define ARM_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
250 (sizeof (arm_debug_state_t)/sizeof(uint32_t)))
251
252 #define ARM_DEBUG_STATE32_COUNT ((mach_msg_type_number_t) \
253 (sizeof (arm_debug_state32_t)/sizeof(uint32_t)))
254
255 #define ARM_PAGEIN_STATE_COUNT ((mach_msg_type_number_t) \
256 (sizeof (arm_pagein_state_t)/sizeof(uint32_t)))
257
258 #define ARM_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \
259 (sizeof (arm_debug_state64_t)/sizeof(uint32_t)))
260
261 #define ARM_NEON_STATE_COUNT ((mach_msg_type_number_t) \
262 (sizeof (arm_neon_state_t)/sizeof(uint32_t)))
263
264 #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \
265 (sizeof (arm_neon_state64_t)/sizeof(uint32_t)))
266
267 #define MACHINE_THREAD_STATE ARM_THREAD_STATE
268 #define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
269
270
271 /*
272 * Largest state on this machine:
273 */
274 #define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX
275
276 #ifdef XNU_KERNEL_PRIVATE
277
278 #if CONFIG_DTRACE
279 #define HAS_ADD_SAVED_STATE_PC 1
280 #define HAS_SET_SAVED_STATE_PC 1
281 #define HAS_SET_SAVED_STATE_LR 1
282 #define HAS_SET_SAVED_STATE_REG 1
283 #define HAS_MASK_SAVED_STATE_CPSR 1
284 #endif /* CONFIG_DTRACE */
285
286 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
287 #define HAS_SET_SAVED_STATE_CPSR 1
288 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
289
290 #if CONFIG_XNUPOST
291 #define HAS_ADD_SAVED_STATE_PC 1
292 #define HAS_SET_SAVED_STATE_PC 1
293 #define HAS_SET_SAVED_STATE_CPSR 1
294 #endif /* CONFIG_DTRACE */
295
296 #if DEBUG || DEVELOPMENT
297 #define HAS_ADD_SAVED_STATE_PC 1
298 #endif
299
300 #if CONFIG_BTI_TELEMETRY
301 /* BTI Telemetry needs CPSR to recover from BTI exceptions */
302 #define HAS_SET_SAVED_STATE_CPSR 1
303 #endif /* CONFIG_HAS_BTI_TELEMETRY */
304
305
306 static inline boolean_t
is_thread_state32(const arm_unified_thread_state_t * its)307 is_thread_state32(const arm_unified_thread_state_t *its)
308 {
309 return its->ash.flavor == ARM_THREAD_STATE32;
310 }
311
312 static inline boolean_t
is_thread_state64(const arm_unified_thread_state_t * its)313 is_thread_state64(const arm_unified_thread_state_t *its)
314 {
315 return its->ash.flavor == ARM_THREAD_STATE64;
316 }
317
318 static inline arm_thread_state32_t*
thread_state32(arm_unified_thread_state_t * its)319 thread_state32(arm_unified_thread_state_t *its)
320 {
321 return &its->ts_32;
322 }
323
324 static inline arm_thread_state64_t*
thread_state64(arm_unified_thread_state_t * its)325 thread_state64(arm_unified_thread_state_t *its)
326 {
327 return &its->ts_64;
328 }
329
330 static inline const arm_thread_state32_t*
const_thread_state32(const arm_unified_thread_state_t * its)331 const_thread_state32(const arm_unified_thread_state_t *its)
332 {
333 return &its->ts_32;
334 }
335
336 static inline const arm_thread_state64_t*
const_thread_state64(const arm_unified_thread_state_t * its)337 const_thread_state64(const arm_unified_thread_state_t *its)
338 {
339 return &its->ts_64;
340 }
341
342 #if defined(__arm64__)
343
344 #include <kern/assert.h>
345 #include <arm64/proc_reg.h>
346 #define CAST_ASSERT_SAFE(type, val) (assert((val) == ((type)(val))), (type)(val))
347
348 /*
349 * GPR context
350 */
351
352 struct arm_saved_state32 {
353 uint32_t r[13]; /* General purpose register r0-r12 */
354 uint32_t sp; /* Stack pointer r13 */
355 uint32_t lr; /* Link register r14 */
356 uint32_t pc; /* Program counter r15 */
357 uint32_t cpsr; /* Current program status register */
358 uint32_t far; /* Virtual fault address */
359 uint32_t esr; /* Exception syndrome register */
360 uint32_t exception; /* Exception number */
361 };
362 typedef struct arm_saved_state32 arm_saved_state32_t;
363
364 struct arm_saved_state32_tagged {
365 uint32_t tag;
366 struct arm_saved_state32 state;
367 };
368 typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t;
369
370 #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
371 (sizeof(arm_saved_state32_t)/sizeof(unsigned int)))
372
373 struct arm_saved_state64 {
374 uint64_t x[29]; /* General purpose registers x0-x28 */
375 uint64_t fp; /* Frame pointer x29 */
376 uint64_t lr; /* Link register x30 */
377 uint64_t sp; /* Stack pointer x31 */
378 uint64_t pc; /* Program counter */
379 uint32_t cpsr; /* Current program status register */
380 uint32_t reserved; /* Reserved padding */
381 uint64_t far; /* Virtual fault address */
382 uint64_t esr; /* Exception syndrome register */
383 #if HAS_APPLE_PAC
384 uint64_t jophash;
385 #endif /* HAS_APPLE_PAC */
386 };
387 typedef struct arm_saved_state64 arm_saved_state64_t;
388
389 #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
390 (sizeof(arm_saved_state64_t)/sizeof(unsigned int)))
391
392 struct arm_saved_state64_tagged {
393 uint32_t tag;
394 struct arm_saved_state64 state;
395 };
396 typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t;
397
398 struct arm_saved_state {
399 arm_state_hdr_t ash;
400 union {
401 struct arm_saved_state32 ss_32;
402 struct arm_saved_state64 ss_64;
403 } uss;
404 } __attribute__((aligned(16)));
405 #define ss_32 uss.ss_32
406 #define ss_64 uss.ss_64
407
408 typedef struct arm_saved_state arm_saved_state_t;
409
410 struct arm_kernel_saved_state {
411 uint64_t x[10]; /* General purpose registers x19-x28 */
412 uint64_t fp; /* Frame pointer x29 */
413 uint64_t lr; /* Link register x30 */
414 uint64_t sp; /* Stack pointer x31 */
415 /* Some things here we DO need to preserve */
416 uint8_t pc_was_in_userspace;
417 uint8_t ssbs;
418 uint8_t dit;
419 uint8_t uao;
420 } __attribute__((aligned(16)));
421
422 typedef struct arm_kernel_saved_state arm_kernel_saved_state_t;
423
424 extern void ml_panic_on_invalid_old_cpsr(const arm_saved_state_t *) __attribute__((noreturn));
425
426 extern void ml_panic_on_invalid_new_cpsr(const arm_saved_state_t *, uint32_t) __attribute__((noreturn));
427
428 #if HAS_APPLE_PAC
429
430 #include <sys/cdefs.h>
431
432 /*
433 * Used by MANIPULATE_SIGNED_THREAD_STATE(), potentially from C++ (IOKit) code.
434 * Open-coded to prevent a circular dependency between mach/arm/thread_status.h
435 * and osfmk/arm/machine_routines.h.
436 */
437 __BEGIN_DECLS
438 extern uint64_t ml_pac_safe_interrupts_disable(void);
439 extern void ml_pac_safe_interrupts_restore(uint64_t);
440 __END_DECLS
441
442 /*
443 * Methods used to sign and check thread state to detect corruptions of saved
444 * thread state across exceptions and context switches.
445 */
446 extern void ml_sign_thread_state(arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
447
448 extern void ml_check_signed_state(const arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
449
450 /* XXX: including stddef.f here breaks ctfmerge on some builds, so use __builtin_offsetof() instead of offsetof() */
451 #define ss64_offsetof(x) __builtin_offsetof(struct arm_saved_state, ss_64.x)
452
453 /**
454 * Verify the signed thread state in _iss, execute the assembly instructions
455 * _instr, and re-sign the modified thread state. Varargs specify additional
456 * inputs.
457 *
458 * _instr may read or modify the thread state in the following registers:
459 *
460 * x0: _iss
461 * x1: authed _iss->ss_64.pc
462 * w2: authed _iss->ss_64.cpsr
463 * x3: authed _iss->ss_64.lr
464 * x4: authed _iss->ss_64.x16
465 * x5: authed _iss->ss_64.x17
466 * x6: scratch register
467 * x7: scratch register
468 *
469 * If _instr makes no changes to the thread state, it may skip re-signing by
470 * branching to label 0.
471 */
472 #define MANIPULATE_SIGNED_THREAD_STATE(_iss, _instr, ...) \
473 do { \
474 uint64_t _intr = ml_pac_safe_interrupts_disable(); \
475 asm volatile ( \
476 "mov x9, lr" "\n" \
477 "mov x0, %[iss]" "\n" \
478 "msr SPSel, #1" "\n" \
479 "ldp x4, x5, [x0, %[SS64_X16]]" "\n" \
480 "ldr x7, [x0, %[SS64_PC]]" "\n" \
481 "ldr w8, [x0, %[SS64_CPSR]]" "\n" \
482 "ldr x3, [x0, %[SS64_LR]]" "\n" \
483 "mov x1, x7" "\n" \
484 "mov w2, w8" "\n" \
485 "bl _ml_check_signed_state" "\n" \
486 "mov x1, x7" "\n" \
487 "mov w2, w8" "\n" \
488 _instr "\n" \
489 "bl _ml_sign_thread_state" "\n" \
490 "0:" "\n" \
491 "msr SPSel, #0" "\n" \
492 "mov lr, x9" "\n" \
493 : \
494 : [iss] "r"(_iss), \
495 [SS64_X16] "i"(ss64_offsetof(x[16])), \
496 [SS64_PC] "i"(ss64_offsetof(pc)), \
497 [SS64_CPSR] "i"(ss64_offsetof(cpsr)), \
498 [SS64_LR] "i"(ss64_offsetof(lr)),##__VA_ARGS__ \
499 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", \
500 "x9", "x16" \
501 ); \
502 ml_pac_safe_interrupts_restore(_intr); \
503 } while (0)
504
505 #define VERIFY_USER_THREAD_STATE_INSTR \
506 "and w6, w2, %[CPSR_EL_MASK]" "\n" \
507 "cmp w6, %[CPSR_EL0]" "\n" \
508 "b.eq 1f" "\n" \
509 "bl _ml_panic_on_invalid_old_cpsr" "\n" \
510 "brk #0" "\n" \
511 "1:" "\n"
512
513 #define VERIFY_USER_THREAD_STATE_INPUTS \
514 [CPSR_EL_MASK] "i"(PSR64_MODE_EL_MASK), \
515 [CPSR_EL0] "i"(PSR64_MODE_EL0)
516
517 #define MANIPULATE_SIGNED_USER_THREAD_STATE(_iss, _instr, ...) \
518 MANIPULATE_SIGNED_THREAD_STATE(_iss, \
519 VERIFY_USER_THREAD_STATE_INSTR \
520 _instr, \
521 VERIFY_USER_THREAD_STATE_INPUTS, ##__VA_ARGS__)
522
523 static inline void
check_and_sign_copied_user_thread_state(arm_saved_state_t * dst,const arm_saved_state_t * src)524 check_and_sign_copied_user_thread_state(arm_saved_state_t *dst, const arm_saved_state_t *src)
525 {
526 MANIPULATE_SIGNED_USER_THREAD_STATE(src,
527 "mov x0, %[dst]",
528 [dst] "r"(dst)
529 );
530 }
531 #endif /* HAS_APPLE_PAC */
532
533 static inline boolean_t
is_saved_state32(const arm_saved_state_t * iss)534 is_saved_state32(const arm_saved_state_t *iss)
535 {
536 return iss->ash.flavor == ARM_SAVED_STATE32;
537 }
538
539 static inline boolean_t
is_saved_state64(const arm_saved_state_t * iss)540 is_saved_state64(const arm_saved_state_t *iss)
541 {
542 return iss->ash.flavor == ARM_SAVED_STATE64;
543 }
544
545 static inline arm_saved_state32_t*
saved_state32(arm_saved_state_t * iss)546 saved_state32(arm_saved_state_t *iss)
547 {
548 return &iss->ss_32;
549 }
550
551 static inline const arm_saved_state32_t*
const_saved_state32(const arm_saved_state_t * iss)552 const_saved_state32(const arm_saved_state_t *iss)
553 {
554 return &iss->ss_32;
555 }
556
557 static inline arm_saved_state64_t*
saved_state64(arm_saved_state_t * iss)558 saved_state64(arm_saved_state_t *iss)
559 {
560 return &iss->ss_64;
561 }
562
563 static inline const arm_saved_state64_t*
const_saved_state64(const arm_saved_state_t * iss)564 const_saved_state64(const arm_saved_state_t *iss)
565 {
566 return &iss->ss_64;
567 }
568
569 static inline register_t
get_saved_state_pc(const arm_saved_state_t * iss)570 get_saved_state_pc(const arm_saved_state_t *iss)
571 {
572 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc);
573 }
574
575 #if HAS_ADD_SAVED_STATE_PC
576 static inline void
add_saved_state_pc(arm_saved_state_t * iss,int diff)577 add_saved_state_pc(arm_saved_state_t *iss, int diff)
578 {
579 if (is_saved_state32(iss)) {
580 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
581 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
582 } else {
583 #if HAS_APPLE_PAC
584 MANIPULATE_SIGNED_THREAD_STATE(iss,
585 "mov w6, %w[diff] \n"
586 "add x1, x1, w6, sxtw \n"
587 "str x1, [x0, %[SS64_PC]] \n",
588 [diff] "r"(diff)
589 );
590 #else
591 saved_state64(iss)->pc += (unsigned long)diff;
592 #endif /* HAS_APPLE_PAC */
593 }
594 }
595 #endif /* HAS_ADD_SAVED_STATE_PC */
596
597 static inline void
add_user_saved_state_pc(arm_saved_state_t * iss,int diff)598 add_user_saved_state_pc(arm_saved_state_t *iss, int diff)
599 {
600 if (is_saved_state32(iss)) {
601 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
602 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
603 } else {
604 #if HAS_APPLE_PAC
605 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
606 "mov w6, %w[diff] \n"
607 "add x1, x1, w6, sxtw \n"
608 "str x1, [x0, %[SS64_PC]] \n",
609 [diff] "r"(diff)
610 );
611 #else
612 saved_state64(iss)->pc += (unsigned long)diff;
613 #endif /* HAS_APPLE_PAC */
614 }
615 }
616
617 #if HAS_SET_SAVED_STATE_PC
618 static inline void
set_saved_state_pc(arm_saved_state_t * iss,register_t pc)619 set_saved_state_pc(arm_saved_state_t *iss, register_t pc)
620 {
621 if (is_saved_state32(iss)) {
622 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
623 } else {
624 #if HAS_APPLE_PAC
625 MANIPULATE_SIGNED_THREAD_STATE(iss,
626 "mov x1, %[pc] \n"
627 "str x1, [x0, %[SS64_PC]] \n",
628 [pc] "r"(pc)
629 );
630 #else
631 saved_state64(iss)->pc = (unsigned long)pc;
632 #endif /* HAS_APPLE_PAC */
633 }
634 }
635 #endif /* HAS_SET_SAVED_STATE_PC */
636
637 static inline void
set_user_saved_state_pc(arm_saved_state_t * iss,register_t pc)638 set_user_saved_state_pc(arm_saved_state_t *iss, register_t pc)
639 {
640 if (is_saved_state32(iss)) {
641 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
642 } else {
643 #if HAS_APPLE_PAC
644 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
645 "mov x1, %[pc] \n"
646 "str x1, [x0, %[SS64_PC]] \n",
647 [pc] "r"(pc)
648 );
649 #else
650 saved_state64(iss)->pc = (unsigned long)pc;
651 #endif /* HAS_APPLE_PAC */
652 }
653 }
654
655 static inline register_t
get_saved_state_sp(const arm_saved_state_t * iss)656 get_saved_state_sp(const arm_saved_state_t *iss)
657 {
658 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp);
659 }
660
661 static inline void
set_saved_state_sp(arm_saved_state_t * iss,register_t sp)662 set_saved_state_sp(arm_saved_state_t *iss, register_t sp)
663 {
664 if (is_saved_state32(iss)) {
665 saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp);
666 } else {
667 saved_state64(iss)->sp = (uint64_t)sp;
668 }
669 }
670
671 static inline register_t
get_saved_state_lr(const arm_saved_state_t * iss)672 get_saved_state_lr(const arm_saved_state_t *iss)
673 {
674 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr);
675 }
676
677 #if HAS_SET_SAVED_STATE_LR
678 static inline void
set_saved_state_lr(arm_saved_state_t * iss,register_t lr)679 set_saved_state_lr(arm_saved_state_t *iss, register_t lr)
680 {
681 if (is_saved_state32(iss)) {
682 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
683 } else {
684 #if HAS_APPLE_PAC
685 MANIPULATE_SIGNED_THREAD_STATE(iss,
686 "mov x3, %[lr] \n"
687 "str x3, [x0, %[SS64_LR]] \n",
688 [lr] "r"(lr)
689 );
690 #else
691 saved_state64(iss)->lr = (unsigned long)lr;
692 #endif /* HAS_APPLE_PAC */
693 }
694 }
695 #endif /* HAS_SET_SAVED_STATE_PC */
696
697 static inline void
set_user_saved_state_lr(arm_saved_state_t * iss,register_t lr)698 set_user_saved_state_lr(arm_saved_state_t *iss, register_t lr)
699 {
700 if (is_saved_state32(iss)) {
701 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
702 } else {
703 #if HAS_APPLE_PAC
704 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
705 "mov x3, %[lr] \n"
706 "str x3, [x0, %[SS64_LR]] \n",
707 [lr] "r"(lr)
708 );
709 #else
710 saved_state64(iss)->lr = (unsigned long)lr;
711 #endif /* HAS_APPLE_PAC */
712 }
713 }
714
715 static inline register_t
get_saved_state_fp(const arm_saved_state_t * iss)716 get_saved_state_fp(const arm_saved_state_t *iss)
717 {
718 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp);
719 }
720
721 static inline void
set_saved_state_fp(arm_saved_state_t * iss,register_t fp)722 set_saved_state_fp(arm_saved_state_t *iss, register_t fp)
723 {
724 if (is_saved_state32(iss)) {
725 saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp);
726 } else {
727 saved_state64(iss)->fp = (uint64_t)fp;
728 }
729 }
730
731 static inline int
check_saved_state_reglimit(const arm_saved_state_t * iss,unsigned reg)732 check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg)
733 {
734 return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT);
735 }
736
737 static inline register_t
get_saved_state_reg(const arm_saved_state_t * iss,unsigned reg)738 get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg)
739 {
740 if (!check_saved_state_reglimit(iss, reg)) {
741 return 0;
742 }
743
744 return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]));
745 }
746
747 #if HAS_SET_SAVED_STATE_REG
748 static inline void
set_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)749 set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
750 {
751 if (!check_saved_state_reglimit(iss, reg)) {
752 return;
753 }
754
755 if (is_saved_state32(iss)) {
756 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
757 } else {
758 #if HAS_APPLE_PAC
759 /* x16 and x17 are part of the jophash */
760 if (reg == 16) {
761 MANIPULATE_SIGNED_THREAD_STATE(iss,
762 "mov x4, %[value] \n"
763 "str x4, [x0, %[SS64_X16]] \n",
764 [value] "r"(value)
765 );
766 return;
767 } else if (reg == 17) {
768 MANIPULATE_SIGNED_THREAD_STATE(iss,
769 "mov x5, %[value] \n"
770 "str x5, [x0, %[SS64_X17]] \n",
771 [value] "r"(value),
772 [SS64_X17] "i"(ss64_offsetof(x[17]))
773 );
774 return;
775 }
776 #endif
777 saved_state64(iss)->x[reg] = (uint64_t)value;
778 }
779 }
780 #endif /* HAS_SET_SAVED_STATE_REG */
781
782 static inline void
set_user_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)783 set_user_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
784 {
785 if (!check_saved_state_reglimit(iss, reg)) {
786 return;
787 }
788
789 if (is_saved_state32(iss)) {
790 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
791 } else {
792 #if HAS_APPLE_PAC
793 /* x16 and x17 are part of the jophash */
794 if (reg == 16) {
795 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
796 "mov x4, %[value] \n"
797 "str x4, [x0, %[SS64_X16]] \n",
798 [value] "r"(value)
799 );
800 return;
801 } else if (reg == 17) {
802 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
803 "mov x5, %[value] \n"
804 "str x5, [x0, %[SS64_X17]] \n",
805 [value] "r"(value),
806 [SS64_X17] "i"(ss64_offsetof(x[17]))
807 );
808 return;
809 }
810 #endif
811 saved_state64(iss)->x[reg] = (uint64_t)value;
812 }
813 }
814
815
816 static inline uint32_t
get_saved_state_cpsr(const arm_saved_state_t * iss)817 get_saved_state_cpsr(const arm_saved_state_t *iss)
818 {
819 return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr;
820 }
821
822 #if HAS_MASK_SAVED_STATE_CPSR
823 static inline void
mask_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)824 mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
825 {
826 if (is_saved_state32(iss)) {
827 saved_state32(iss)->cpsr |= set_bits;
828 saved_state32(iss)->cpsr &= ~clear_bits;
829 } else {
830 #if HAS_APPLE_PAC
831 MANIPULATE_SIGNED_THREAD_STATE(iss,
832 "mov w6, %w[set_bits] \n"
833 "orr w2, w2, w6, lsl #0 \n"
834 "mov w6, %w[clear_bits] \n"
835 "bic w2, w2, w6, lsl #0 \n"
836 "str w2, [x0, %[SS64_CPSR]] \n",
837 [set_bits] "r"(set_bits),
838 [clear_bits] "r"(clear_bits)
839 );
840 #else
841 saved_state64(iss)->cpsr |= set_bits;
842 saved_state64(iss)->cpsr &= ~clear_bits;
843 #endif /* HAS_APPLE_PAC */
844 }
845 }
846 #endif /* HAS_MASK_SAVED_STATE_CPSR */
847
848 static inline void
mask_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)849 mask_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
850 {
851 if (is_saved_state32(iss)) {
852 uint32_t new_cpsr = saved_state32(iss)->cpsr;
853 new_cpsr |= set_bits;
854 new_cpsr &= ~clear_bits;
855 if (!PSR_IS_USER(new_cpsr)) {
856 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
857 }
858 saved_state32(iss)->cpsr = new_cpsr;
859 } else {
860 #if HAS_APPLE_PAC
861 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
862 "mov w6, %w[set_bits] \n"
863 "orr w2, w2, w6, lsl #0 \n"
864 "mov w6, %w[clear_bits] \n"
865 "bic w2, w2, w6, lsl #0 \n"
866 "and w6, w2, %[CPSR_EL_MASK] \n"
867 "cmp w6, %[CPSR_EL0] \n"
868 "b.eq 1f \n"
869 "mov w1, w2 \n"
870 "bl _ml_panic_on_invalid_new_cpsr \n"
871 "brk #0 \n"
872 "1: \n"
873 "str w2, [x0, %[SS64_CPSR]] \n",
874 [set_bits] "r"(set_bits),
875 [clear_bits] "r"(clear_bits)
876 );
877 #else
878 uint32_t new_cpsr = saved_state64(iss)->cpsr;
879 new_cpsr |= set_bits;
880 new_cpsr &= ~clear_bits;
881 if (!PSR64_IS_USER(new_cpsr)) {
882 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
883 }
884 saved_state64(iss)->cpsr = new_cpsr;
885 #endif /* HAS_APPLE_PAC */
886 }
887 }
888
889 #if HAS_SET_SAVED_STATE_CPSR
890 static inline void
set_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)891 set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
892 {
893 if (is_saved_state32(iss)) {
894 saved_state32(iss)->cpsr = cpsr;
895 } else {
896 #if HAS_APPLE_PAC
897 MANIPULATE_SIGNED_THREAD_STATE(iss,
898 "mov w2, %w[cpsr] \n"
899 "str w2, [x0, %[SS64_CPSR]] \n",
900 [cpsr] "r"(cpsr)
901 );
902 #else
903 saved_state64(iss)->cpsr = cpsr;
904 #endif /* HAS_APPLE_PAC */
905 }
906 }
907 #endif /* HAS_SET_SAVED_STATE_CPSR */
908
909 static inline void
set_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)910 set_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
911 {
912 if (is_saved_state32(iss)) {
913 if (!PSR_IS_USER(cpsr)) {
914 ml_panic_on_invalid_new_cpsr(iss, cpsr);
915 }
916 saved_state32(iss)->cpsr = cpsr;
917 } else {
918 #if HAS_APPLE_PAC
919 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
920 "mov w2, %w[cpsr] \n"
921 "and w6, w2, %[CPSR_EL_MASK] \n"
922 "cmp w6, %[CPSR_EL0] \n"
923 "b.eq 1f \n"
924 "mov w1, w2 \n"
925 "bl _ml_panic_on_invalid_new_cpsr \n"
926 "brk #0 \n"
927 "1: \n"
928 "str w2, [x0, %[SS64_CPSR]] \n",
929 [cpsr] "r"(cpsr)
930 );
931 #else
932 if (!PSR64_IS_USER(cpsr)) {
933 ml_panic_on_invalid_new_cpsr(iss, cpsr);
934 }
935 saved_state64(iss)->cpsr = cpsr;
936 #endif /* HAS_APPLE_PAC */
937 }
938 }
939
940 static inline register_t
get_saved_state_far(const arm_saved_state_t * iss)941 get_saved_state_far(const arm_saved_state_t *iss)
942 {
943 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far);
944 }
945
946 static inline void
set_saved_state_far(arm_saved_state_t * iss,register_t far)947 set_saved_state_far(arm_saved_state_t *iss, register_t far)
948 {
949 if (is_saved_state32(iss)) {
950 saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far);
951 } else {
952 saved_state64(iss)->far = (uint64_t)far;
953 }
954 }
955
956 static inline uint64_t
get_saved_state_esr(const arm_saved_state_t * iss)957 get_saved_state_esr(const arm_saved_state_t *iss)
958 {
959 return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr;
960 }
961
962 static inline void
set_saved_state_esr(arm_saved_state_t * iss,uint64_t esr)963 set_saved_state_esr(arm_saved_state_t *iss, uint64_t esr)
964 {
965 if (is_saved_state32(iss)) {
966 assert(esr < (uint64_t) (uint32_t) -1);
967 saved_state32(iss)->esr = (uint32_t) esr;
968 } else {
969 saved_state64(iss)->esr = esr;
970 }
971 }
972
973 extern void panic_unimplemented(void);
974
975 /**
976 * Extracts the SVC (Supervisor Call) number from the appropriate GPR (General
977 * Purpose Register).
978 *
979 * @param[in] iss the 32-bit or 64-bit ARM saved state (i.e. trap frame).
980 *
981 * @return The SVC number.
982 */
983 static inline int
get_saved_state_svc_number(const arm_saved_state_t * iss)984 get_saved_state_svc_number(const arm_saved_state_t *iss)
985 {
986 return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */
987 }
988
989 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t;
990
991 struct arm_debug_aggregate_state {
992 arm_state_hdr_t dsh;
993 union {
994 arm_debug_state32_t ds32;
995 arm_debug_state64_t ds64;
996 } uds;
997 os_refcnt_t ref;
998 } __attribute__((aligned(16)));
999
1000 typedef struct arm_debug_aggregate_state arm_debug_state_t;
1001
1002 #define ARM_LEGACY_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
1003 (sizeof (arm_legacy_debug_state_t)/sizeof(uint32_t)))
1004
1005 /*
1006 * NEON context
1007 */
1008 typedef __uint128_t uint128_t;
1009 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2)));
1010 typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4)));
1011
1012 struct arm_neon_saved_state32 {
1013 union {
1014 uint128_t q[16];
1015 uint64_t d[32];
1016 uint32_t s[32];
1017 } v;
1018 uint32_t fpsr;
1019 uint32_t fpcr;
1020 };
1021 typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t;
1022
1023 #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
1024 (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int)))
1025
1026 struct arm_neon_saved_state64 {
1027 union {
1028 uint128_t q[32];
1029 uint64x2_t d[32];
1030 uint32x4_t s[32];
1031 } v;
1032 uint32_t fpsr;
1033 uint32_t fpcr;
1034 };
1035 typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t;
1036
1037 #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
1038 (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int)))
1039
1040 struct arm_neon_saved_state {
1041 arm_state_hdr_t nsh;
1042 union {
1043 struct arm_neon_saved_state32 ns_32;
1044 struct arm_neon_saved_state64 ns_64;
1045 } uns;
1046 };
1047 typedef struct arm_neon_saved_state arm_neon_saved_state_t;
1048 #define ns_32 uns.ns_32
1049 #define ns_64 uns.ns_64
1050
1051 struct arm_kernel_neon_saved_state {
1052 uint64_t d[8];
1053 uint32_t fpcr;
1054 };
1055 typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t;
1056
1057 static inline boolean_t
is_neon_saved_state32(const arm_neon_saved_state_t * state)1058 is_neon_saved_state32(const arm_neon_saved_state_t *state)
1059 {
1060 return state->nsh.flavor == ARM_NEON_SAVED_STATE32;
1061 }
1062
1063 static inline boolean_t
is_neon_saved_state64(const arm_neon_saved_state_t * state)1064 is_neon_saved_state64(const arm_neon_saved_state_t *state)
1065 {
1066 return state->nsh.flavor == ARM_NEON_SAVED_STATE64;
1067 }
1068
1069 static inline arm_neon_saved_state32_t *
neon_state32(arm_neon_saved_state_t * state)1070 neon_state32(arm_neon_saved_state_t *state)
1071 {
1072 return &state->ns_32;
1073 }
1074
1075 static inline arm_neon_saved_state64_t *
neon_state64(arm_neon_saved_state_t * state)1076 neon_state64(arm_neon_saved_state_t *state)
1077 {
1078 return &state->ns_64;
1079 }
1080
1081
1082 #if HAS_ARM_FEAT_SME
1083
1084
1085 struct arm_sme_saved_state;
1086 typedef struct arm_sme_saved_state arm_sme_saved_state_t;
1087
1088 #if !__has_ptrcheck
1089 typedef struct {
1090 uint8_t zt0[64];
1091 uint8_t __z_p_za[];
1092 } arm_sme_context_t;
1093
1094 struct arm_sme_saved_state {
1095 arm_state_hdr_t hdr;
1096 uint64_t svcr;
1097 uint16_t svl_b;
1098 arm_sme_context_t context;
1099 };
1100
1101 static inline size_t
arm_sme_z_size(uint16_t svl_b)1102 arm_sme_z_size(uint16_t svl_b)
1103 {
1104 return 32 * svl_b;
1105 }
1106
1107 static inline size_t
arm_sme_p_size(uint16_t svl_b)1108 arm_sme_p_size(uint16_t svl_b)
1109 {
1110 return 2 * svl_b;
1111 }
1112
1113 static inline size_t
arm_sme_za_size(uint16_t svl_b)1114 arm_sme_za_size(uint16_t svl_b)
1115 {
1116 return svl_b * svl_b;
1117 }
1118
1119 static inline mach_msg_type_number_t
arm_sme_saved_state_count(uint16_t svl_b)1120 arm_sme_saved_state_count(uint16_t svl_b)
1121 {
1122 assert(svl_b % 16 == 0);
1123 size_t size = sizeof(arm_sme_saved_state_t) +
1124 arm_sme_z_size(svl_b) +
1125 arm_sme_p_size(svl_b) +
1126 arm_sme_za_size(svl_b);
1127 return (mach_msg_type_number_t)(size / sizeof(unsigned int));
1128 }
1129
1130 static inline uint8_t *
arm_sme_z(arm_sme_context_t * ss)1131 arm_sme_z(arm_sme_context_t *ss)
1132 {
1133 return ss->__z_p_za;
1134 }
1135
1136 static inline const uint8_t *
const_arm_sme_z(const arm_sme_context_t * ss)1137 const_arm_sme_z(const arm_sme_context_t *ss)
1138 {
1139 return ss->__z_p_za;
1140 }
1141
1142 static inline uint8_t *
arm_sme_p(arm_sme_context_t * ss,uint16_t svl_b)1143 arm_sme_p(arm_sme_context_t *ss, uint16_t svl_b)
1144 {
1145 return ss->__z_p_za + arm_sme_z_size(svl_b);
1146 }
1147
1148 static inline const uint8_t *
const_arm_sme_p(const arm_sme_context_t * ss,uint16_t svl_b)1149 const_arm_sme_p(const arm_sme_context_t *ss, uint16_t svl_b)
1150 {
1151 return ss->__z_p_za + arm_sme_z_size(svl_b);
1152 }
1153
1154 static inline uint8_t *
arm_sme_za(arm_sme_context_t * ss,uint16_t svl_b)1155 arm_sme_za(arm_sme_context_t *ss, uint16_t svl_b)
1156 {
1157 return ss->__z_p_za + arm_sme_z_size(svl_b) + arm_sme_p_size(svl_b);
1158 }
1159
1160 static inline const uint8_t *
const_arm_sme_za(const arm_sme_context_t * ss,uint16_t svl_b)1161 const_arm_sme_za(const arm_sme_context_t *ss, uint16_t svl_b)
1162 {
1163 return ss->__z_p_za + arm_sme_z_size(svl_b) + arm_sme_p_size(svl_b);
1164 }
1165
1166 #endif /* !__has_ptrcheck */
1167 #endif /* HAS_ARM_FEAT_SME */
1168
1169 /*
1170 * Aggregated context
1171 */
1172
1173 struct arm_context {
1174 struct arm_saved_state ss;
1175 struct arm_neon_saved_state ns;
1176 };
1177 typedef struct arm_context arm_context_t;
1178
1179 struct arm_kernel_context {
1180 struct arm_kernel_saved_state ss;
1181 struct arm_kernel_neon_saved_state ns;
1182 };
1183 typedef struct arm_kernel_context arm_kernel_context_t;
1184
1185 extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*);
1186 extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*);
1187
1188 #else /* defined(__arm64__) */
1189 #error Unknown arch
1190 #endif /* defined(__arm64__) */
1191
1192 extern void saved_state_to_thread_state32(const arm_saved_state_t*, arm_thread_state32_t*);
1193 extern void thread_state32_to_saved_state(const arm_thread_state32_t*, arm_saved_state_t*);
1194
1195 #endif /* XNU_KERNEL_PRIVATE */
1196
1197 #endif /* defined (__arm__) || defined (__arm64__) */
1198
1199 #endif /* _ARM_THREAD_STATUS_H_ */
1200