1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * FILE_ID: thread_status.h
30 */
31
32
33 #ifndef _ARM_THREAD_STATUS_H_
34 #define _ARM_THREAD_STATUS_H_
35
36 #if defined (__arm__) || defined (__arm64__)
37
38 #include <mach/machine/_structs.h>
39 #include <mach/machine/thread_state.h>
40 #include <mach/message.h>
41 #include <mach/vm_types.h>
42
43 #ifdef XNU_KERNEL_PRIVATE
44 #include <os/refcnt.h>
45 #endif
46
47 /*
48 * Support for determining the state of a thread
49 */
50
51
52 /*
53 * Flavors
54 */
55
56 #define ARM_THREAD_STATE 1
57 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE
58 #define ARM_VFP_STATE 2
59 #define ARM_EXCEPTION_STATE 3
60 #define ARM_DEBUG_STATE 4 /* pre-armv8 */
61 #define THREAD_STATE_NONE 5
62 #define ARM_THREAD_STATE64 6
63 #define ARM_EXCEPTION_STATE64 7
64 // ARM_THREAD_STATE_LAST 8 /* legacy */
65 #define ARM_THREAD_STATE32 9
66
67 #ifdef XNU_KERNEL_PRIVATE
68 #define X86_THREAD_STATE_NONE 13 /* i386/thread_status.h THREAD_STATE_NONE */
69 #endif /* XNU_KERNEL_PRIVATE */
70
71 /* API */
72 #define ARM_DEBUG_STATE32 14
73 #define ARM_DEBUG_STATE64 15
74 #define ARM_NEON_STATE 16
75 #define ARM_NEON_STATE64 17
76 #define ARM_CPMU_STATE64 18
77
78 #ifdef XNU_KERNEL_PRIVATE
79 /* For kernel use */
80 #define ARM_SAVED_STATE32 20
81 #define ARM_SAVED_STATE64 21
82 #define ARM_NEON_SAVED_STATE32 22
83 #define ARM_NEON_SAVED_STATE64 23
84 #endif /* XNU_KERNEL_PRIVATE */
85
86 #define ARM_PAGEIN_STATE 27
87
88
89 #ifndef ARM_STATE_FLAVOR_IS_OTHER_VALID
90 #define ARM_STATE_FLAVOR_IS_OTHER_VALID(_flavor_) 0
91 #endif
92
93 #define FLAVOR_MODIFIES_CORE_CPU_REGISTERS(x) \
94 ((x == ARM_THREAD_STATE) || \
95 (x == ARM_THREAD_STATE32) || \
96 (x == ARM_THREAD_STATE64))
97
98 #define VALID_THREAD_STATE_FLAVOR(x) \
99 ((x == ARM_THREAD_STATE) || \
100 (x == ARM_VFP_STATE) || \
101 (x == ARM_EXCEPTION_STATE) || \
102 (x == ARM_DEBUG_STATE) || \
103 (x == THREAD_STATE_NONE) || \
104 (x == ARM_THREAD_STATE32) || \
105 (x == ARM_THREAD_STATE64) || \
106 (x == ARM_EXCEPTION_STATE64) || \
107 (x == ARM_NEON_STATE) || \
108 (x == ARM_NEON_STATE64) || \
109 (x == ARM_DEBUG_STATE32) || \
110 (x == ARM_DEBUG_STATE64) || \
111 (x == ARM_PAGEIN_STATE) || \
112 (ARM_STATE_FLAVOR_IS_OTHER_VALID(x)))
113
114 struct arm_state_hdr {
115 uint32_t flavor;
116 uint32_t count;
117 };
118 typedef struct arm_state_hdr arm_state_hdr_t;
119
120 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state_t;
121 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state32_t;
122 typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t;
123
124 #if !defined(KERNEL)
125 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__)
126
127 /* Accessor macros for arm_thread_state64_t pointer fields */
128
129 /* Return pc field of arm_thread_state64_t as a data pointer value */
130 #define arm_thread_state64_get_pc(ts) \
131 __darwin_arm_thread_state64_get_pc(ts)
132 /* Return pc field of arm_thread_state64_t as a function pointer. May return
133 * NULL if a valid function pointer cannot be constructed, the caller should
134 * fall back to the arm_thread_state64_get_pc() macro in that case. */
135 #define arm_thread_state64_get_pc_fptr(ts) \
136 __darwin_arm_thread_state64_get_pc_fptr(ts)
137 /* Set pc field of arm_thread_state64_t to a function pointer */
138 #define arm_thread_state64_set_pc_fptr(ts, fptr) \
139 __darwin_arm_thread_state64_set_pc_fptr(ts, fptr)
140 /* Return lr field of arm_thread_state64_t as a data pointer value */
141 #define arm_thread_state64_get_lr(ts) \
142 __darwin_arm_thread_state64_get_lr(ts)
143 /* Return lr field of arm_thread_state64_t as a function pointer. May return
144 * NULL if a valid function pointer cannot be constructed, the caller should
145 * fall back to the arm_thread_state64_get_lr() macro in that case. */
146 #define arm_thread_state64_get_lr_fptr(ts) \
147 __darwin_arm_thread_state64_get_lr_fptr(ts)
148 /* Set lr field of arm_thread_state64_t to a function pointer */
149 #define arm_thread_state64_set_lr_fptr(ts, fptr) \
150 __darwin_arm_thread_state64_set_lr_fptr(ts, fptr)
151 /* Return sp field of arm_thread_state64_t as a data pointer value */
152 #define arm_thread_state64_get_sp(ts) \
153 __darwin_arm_thread_state64_get_sp(ts)
154 /* Set sp field of arm_thread_state64_t to a data pointer value */
155 #define arm_thread_state64_set_sp(ts, ptr) \
156 __darwin_arm_thread_state64_set_sp(ts, ptr)
157 /* Return fp field of arm_thread_state64_t as a data pointer value */
158 #define arm_thread_state64_get_fp(ts) \
159 __darwin_arm_thread_state64_get_fp(ts)
160 /* Set fp field of arm_thread_state64_t to a data pointer value */
161 #define arm_thread_state64_set_fp(ts, ptr) \
162 __darwin_arm_thread_state64_set_fp(ts, ptr)
163 /* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */
164 #define arm_thread_state64_ptrauth_strip(ts) \
165 __darwin_arm_thread_state64_ptrauth_strip(ts)
166
167 #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */
168 #endif /* !defined(KERNEL) */
169
170 struct arm_unified_thread_state {
171 arm_state_hdr_t ash;
172 union {
173 arm_thread_state32_t ts_32;
174 arm_thread_state64_t ts_64;
175 } uts;
176 };
177 #define ts_32 uts.ts_32
178 #define ts_64 uts.ts_64
179 typedef struct arm_unified_thread_state arm_unified_thread_state_t;
180
181 #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
182 (sizeof (arm_thread_state_t)/sizeof(uint32_t)))
183 #define ARM_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \
184 (sizeof (arm_thread_state32_t)/sizeof(uint32_t)))
185 #define ARM_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \
186 (sizeof (arm_thread_state64_t)/sizeof(uint32_t)))
187 #define ARM_UNIFIED_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
188 (sizeof (arm_unified_thread_state_t)/sizeof(uint32_t)))
189
190
191 typedef _STRUCT_ARM_VFP_STATE arm_vfp_state_t;
192 typedef _STRUCT_ARM_NEON_STATE arm_neon_state_t;
193 typedef _STRUCT_ARM_NEON_STATE arm_neon_state32_t;
194 typedef _STRUCT_ARM_NEON_STATE64 arm_neon_state64_t;
195
196
197 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state_t;
198 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state32_t;
199 typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t;
200
201 typedef _STRUCT_ARM_DEBUG_STATE32 arm_debug_state32_t;
202 typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t;
203
204 typedef _STRUCT_ARM_PAGEIN_STATE arm_pagein_state_t;
205
206 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
207 /* See below for ARM64 kernel structure definition for arm_debug_state. */
208 #else /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
209 /*
210 * Otherwise not ARM64 kernel and we must preserve legacy ARM definitions of
211 * arm_debug_state for binary compatability of userland consumers of this file.
212 */
213 #if defined(__arm__)
214 typedef _STRUCT_ARM_DEBUG_STATE arm_debug_state_t;
215 #elif defined(__arm64__)
216 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t;
217 #else /* defined(__arm__) */
218 #error Undefined architecture
219 #endif /* defined(__arm__) */
220 #endif /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
221
222 #define ARM_VFP_STATE_COUNT ((mach_msg_type_number_t) \
223 (sizeof (arm_vfp_state_t)/sizeof(uint32_t)))
224
225 #define ARM_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \
226 (sizeof (arm_exception_state_t)/sizeof(uint32_t)))
227
228 #define ARM_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \
229 (sizeof (arm_exception_state64_t)/sizeof(uint32_t)))
230
231 #define ARM_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
232 (sizeof (arm_debug_state_t)/sizeof(uint32_t)))
233
234 #define ARM_DEBUG_STATE32_COUNT ((mach_msg_type_number_t) \
235 (sizeof (arm_debug_state32_t)/sizeof(uint32_t)))
236
237 #define ARM_PAGEIN_STATE_COUNT ((mach_msg_type_number_t) \
238 (sizeof (arm_pagein_state_t)/sizeof(uint32_t)))
239
240 #define ARM_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \
241 (sizeof (arm_debug_state64_t)/sizeof(uint32_t)))
242
243 #define ARM_NEON_STATE_COUNT ((mach_msg_type_number_t) \
244 (sizeof (arm_neon_state_t)/sizeof(uint32_t)))
245
246 #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \
247 (sizeof (arm_neon_state64_t)/sizeof(uint32_t)))
248
249 #define MACHINE_THREAD_STATE ARM_THREAD_STATE
250 #define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
251
252
253 /*
254 * Largest state on this machine:
255 */
256 #define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX
257
258 #ifdef XNU_KERNEL_PRIVATE
259
260 #if CONFIG_DTRACE
261 #define HAS_ADD_SAVED_STATE_PC 1
262 #define HAS_SET_SAVED_STATE_PC 1
263 #define HAS_SET_SAVED_STATE_LR 1
264 #define HAS_SET_SAVED_STATE_REG 1
265 #define HAS_MASK_SAVED_STATE_CPSR 1
266 #endif /* CONFIG_DTRACE */
267
268 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
269 #define HAS_SET_SAVED_STATE_CPSR 1
270 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
271
272 #if CONFIG_XNUPOST
273 #define HAS_ADD_SAVED_STATE_PC 1
274 #define HAS_SET_SAVED_STATE_PC 1
275 #endif /* CONFIG_DTRACE */
276
277 #if DEBUG || DEVELOPMENT
278 #define HAS_ADD_SAVED_STATE_PC 1
279 #endif
280
281
282 static inline boolean_t
is_thread_state32(const arm_unified_thread_state_t * its)283 is_thread_state32(const arm_unified_thread_state_t *its)
284 {
285 return its->ash.flavor == ARM_THREAD_STATE32;
286 }
287
288 static inline boolean_t
is_thread_state64(const arm_unified_thread_state_t * its)289 is_thread_state64(const arm_unified_thread_state_t *its)
290 {
291 return its->ash.flavor == ARM_THREAD_STATE64;
292 }
293
294 static inline arm_thread_state32_t*
thread_state32(arm_unified_thread_state_t * its)295 thread_state32(arm_unified_thread_state_t *its)
296 {
297 return &its->ts_32;
298 }
299
300 static inline arm_thread_state64_t*
thread_state64(arm_unified_thread_state_t * its)301 thread_state64(arm_unified_thread_state_t *its)
302 {
303 return &its->ts_64;
304 }
305
306 static inline const arm_thread_state32_t*
const_thread_state32(const arm_unified_thread_state_t * its)307 const_thread_state32(const arm_unified_thread_state_t *its)
308 {
309 return &its->ts_32;
310 }
311
312 static inline const arm_thread_state64_t*
const_thread_state64(const arm_unified_thread_state_t * its)313 const_thread_state64(const arm_unified_thread_state_t *its)
314 {
315 return &its->ts_64;
316 }
317
318 #if defined(__arm64__)
319
320 #include <kern/assert.h>
321 #include <arm64/proc_reg.h>
322 #define CAST_ASSERT_SAFE(type, val) (assert((val) == ((type)(val))), (type)(val))
323
324 /*
325 * GPR context
326 */
327
328 struct arm_saved_state32 {
329 uint32_t r[13]; /* General purpose register r0-r12 */
330 uint32_t sp; /* Stack pointer r13 */
331 uint32_t lr; /* Link register r14 */
332 uint32_t pc; /* Program counter r15 */
333 uint32_t cpsr; /* Current program status register */
334 uint32_t far; /* Virtual fault address */
335 uint32_t esr; /* Exception syndrome register */
336 uint32_t exception; /* Exception number */
337 };
338 typedef struct arm_saved_state32 arm_saved_state32_t;
339
340 struct arm_saved_state32_tagged {
341 uint32_t tag;
342 struct arm_saved_state32 state;
343 };
344 typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t;
345
346 #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
347 (sizeof(arm_saved_state32_t)/sizeof(unsigned int)))
348
349 struct arm_saved_state64 {
350 uint64_t x[29]; /* General purpose registers x0-x28 */
351 uint64_t fp; /* Frame pointer x29 */
352 uint64_t lr; /* Link register x30 */
353 uint64_t sp; /* Stack pointer x31 */
354 uint64_t pc; /* Program counter */
355 uint32_t cpsr; /* Current program status register */
356 uint32_t reserved; /* Reserved padding */
357 uint64_t far; /* Virtual fault address */
358 uint32_t esr; /* Exception syndrome register */
359 uint32_t exception; /* Exception number */
360 #if HAS_APPLE_PAC
361 uint64_t jophash;
362 #endif /* HAS_APPLE_PAC */
363 };
364 typedef struct arm_saved_state64 arm_saved_state64_t;
365
366 #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
367 (sizeof(arm_saved_state64_t)/sizeof(unsigned int)))
368
369 struct arm_saved_state64_tagged {
370 uint32_t tag;
371 struct arm_saved_state64 state;
372 };
373 typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t;
374
375 struct arm_saved_state {
376 arm_state_hdr_t ash;
377 union {
378 struct arm_saved_state32 ss_32;
379 struct arm_saved_state64 ss_64;
380 } uss;
381 } __attribute__((aligned(16)));
382 #define ss_32 uss.ss_32
383 #define ss_64 uss.ss_64
384
385 typedef struct arm_saved_state arm_saved_state_t;
386
387 struct arm_kernel_saved_state {
388 uint64_t x[10]; /* General purpose registers x19-x28 */
389 uint64_t fp; /* Frame pointer x29 */
390 uint64_t lr; /* Link register x30 */
391 uint64_t sp; /* Stack pointer x31 */
392 /* Some things here we DO need to preserve */
393 uint8_t pc_was_in_userspace;
394 uint8_t ssbs;
395 uint8_t dit;
396 uint8_t uao;
397 } __attribute__((aligned(16)));
398
399 typedef struct arm_kernel_saved_state arm_kernel_saved_state_t;
400
401 extern void ml_panic_on_invalid_old_cpsr(const arm_saved_state_t *) __attribute__((noreturn));
402
403 extern void ml_panic_on_invalid_new_cpsr(const arm_saved_state_t *, uint32_t) __attribute__((noreturn));
404
405 #if HAS_APPLE_PAC
406
407 #include <sys/cdefs.h>
408
409 /*
410 * Used by MANIPULATE_SIGNED_THREAD_STATE(), potentially from C++ (IOKit) code.
411 * Open-coded to prevent a circular dependency between mach/arm/thread_status.h
412 * and osfmk/arm/machine_routines.h.
413 */
414 __BEGIN_DECLS
415 extern uint64_t ml_pac_safe_interrupts_disable(void);
416 extern void ml_pac_safe_interrupts_restore(uint64_t);
417 __END_DECLS
418
419 /*
420 * Methods used to sign and check thread state to detect corruptions of saved
421 * thread state across exceptions and context switches.
422 */
423 extern void ml_sign_thread_state(arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
424
425 extern void ml_check_signed_state(const arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
426
427 /* XXX: including stddef.f here breaks ctfmerge on some builds, so use __builtin_offsetof() instead of offsetof() */
428 #define ss64_offsetof(x) __builtin_offsetof(struct arm_saved_state, ss_64.x)
429
430 /**
431 * Verify the signed thread state in _iss, execute the assembly instructions
432 * _instr, and re-sign the modified thread state. Varargs specify additional
433 * inputs.
434 *
435 * _instr may read or modify the thread state in the following registers:
436 *
437 * x0: _iss
438 * x1: authed _iss->ss_64.pc
439 * w2: authed _iss->ss_64.cpsr
440 * x3: authed _iss->ss_64.lr
441 * x4: authed _iss->ss_64.x16
442 * x5: authed _iss->ss_64.x17
443 * x6: scratch register
444 * x7: scratch register
445 *
446 * If _instr makes no changes to the thread state, it may skip re-signing by
447 * branching to label 0.
448 */
449 #define MANIPULATE_SIGNED_THREAD_STATE(_iss, _instr, ...) \
450 do { \
451 uint64_t _intr = ml_pac_safe_interrupts_disable(); \
452 asm volatile ( \
453 "mov x9, lr" "\n" \
454 "mov x0, %[iss]" "\n" \
455 "msr SPSel, #1" "\n" \
456 "ldp x4, x5, [x0, %[SS64_X16]]" "\n" \
457 "ldr x7, [x0, %[SS64_PC]]" "\n" \
458 "ldr w8, [x0, %[SS64_CPSR]]" "\n" \
459 "ldr x3, [x0, %[SS64_LR]]" "\n" \
460 "mov x1, x7" "\n" \
461 "mov w2, w8" "\n" \
462 "bl _ml_check_signed_state" "\n" \
463 "mov x1, x7" "\n" \
464 "mov w2, w8" "\n" \
465 _instr "\n" \
466 "bl _ml_sign_thread_state" "\n" \
467 "0:" "\n" \
468 "msr SPSel, #0" "\n" \
469 "mov lr, x9" "\n" \
470 : \
471 : [iss] "r"(_iss), \
472 [SS64_X16] "i"(ss64_offsetof(x[16])), \
473 [SS64_PC] "i"(ss64_offsetof(pc)), \
474 [SS64_CPSR] "i"(ss64_offsetof(cpsr)), \
475 [SS64_LR] "i"(ss64_offsetof(lr)),##__VA_ARGS__ \
476 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", \
477 "x9" \
478 ); \
479 ml_pac_safe_interrupts_restore(_intr); \
480 } while (0)
481
482 #define VERIFY_USER_THREAD_STATE_INSTR \
483 "and w6, w2, %[CPSR_EL_MASK]" "\n" \
484 "cmp w6, %[CPSR_EL0]" "\n" \
485 "b.eq 1f" "\n" \
486 "b _ml_panic_on_invalid_old_cpsr" "\n" \
487 "1:" "\n"
488
489 #define VERIFY_USER_THREAD_STATE_INPUTS \
490 [CPSR_EL_MASK] "i"(PSR64_MODE_EL_MASK), \
491 [CPSR_EL0] "i"(PSR64_MODE_EL0)
492
493 #define MANIPULATE_SIGNED_USER_THREAD_STATE(_iss, _instr, ...) \
494 MANIPULATE_SIGNED_THREAD_STATE(_iss, \
495 VERIFY_USER_THREAD_STATE_INSTR \
496 _instr, \
497 VERIFY_USER_THREAD_STATE_INPUTS, ##__VA_ARGS__)
498
499 static inline void
check_and_sign_copied_user_thread_state(arm_saved_state_t * dst,const arm_saved_state_t * src)500 check_and_sign_copied_user_thread_state(arm_saved_state_t *dst, const arm_saved_state_t *src)
501 {
502 MANIPULATE_SIGNED_USER_THREAD_STATE(src,
503 "mov x0, %[dst]",
504 [dst] "r"(dst)
505 );
506 }
507 #endif /* HAS_APPLE_PAC */
508
509 static inline boolean_t
is_saved_state32(const arm_saved_state_t * iss)510 is_saved_state32(const arm_saved_state_t *iss)
511 {
512 return iss->ash.flavor == ARM_SAVED_STATE32;
513 }
514
515 static inline boolean_t
is_saved_state64(const arm_saved_state_t * iss)516 is_saved_state64(const arm_saved_state_t *iss)
517 {
518 return iss->ash.flavor == ARM_SAVED_STATE64;
519 }
520
521 static inline arm_saved_state32_t*
saved_state32(arm_saved_state_t * iss)522 saved_state32(arm_saved_state_t *iss)
523 {
524 return &iss->ss_32;
525 }
526
527 static inline const arm_saved_state32_t*
const_saved_state32(const arm_saved_state_t * iss)528 const_saved_state32(const arm_saved_state_t *iss)
529 {
530 return &iss->ss_32;
531 }
532
533 static inline arm_saved_state64_t*
saved_state64(arm_saved_state_t * iss)534 saved_state64(arm_saved_state_t *iss)
535 {
536 return &iss->ss_64;
537 }
538
539 static inline const arm_saved_state64_t*
const_saved_state64(const arm_saved_state_t * iss)540 const_saved_state64(const arm_saved_state_t *iss)
541 {
542 return &iss->ss_64;
543 }
544
545 static inline register_t
get_saved_state_pc(const arm_saved_state_t * iss)546 get_saved_state_pc(const arm_saved_state_t *iss)
547 {
548 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc);
549 }
550
551 #if HAS_ADD_SAVED_STATE_PC
552 static inline void
add_saved_state_pc(arm_saved_state_t * iss,int diff)553 add_saved_state_pc(arm_saved_state_t *iss, int diff)
554 {
555 if (is_saved_state32(iss)) {
556 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
557 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
558 } else {
559 #if HAS_APPLE_PAC
560 MANIPULATE_SIGNED_THREAD_STATE(iss,
561 "mov w6, %w[diff] \n"
562 "add x1, x1, w6, sxtw \n"
563 "str x1, [x0, %[SS64_PC]] \n",
564 [diff] "r"(diff)
565 );
566 #else
567 saved_state64(iss)->pc += (unsigned long)diff;
568 #endif /* HAS_APPLE_PAC */
569 }
570 }
571 #endif /* HAS_ADD_SAVED_STATE_PC */
572
573 static inline void
add_user_saved_state_pc(arm_saved_state_t * iss,int diff)574 add_user_saved_state_pc(arm_saved_state_t *iss, int diff)
575 {
576 if (is_saved_state32(iss)) {
577 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
578 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
579 } else {
580 #if HAS_APPLE_PAC
581 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
582 "mov w6, %w[diff] \n"
583 "add x1, x1, w6, sxtw \n"
584 "str x1, [x0, %[SS64_PC]] \n",
585 [diff] "r"(diff)
586 );
587 #else
588 saved_state64(iss)->pc += (unsigned long)diff;
589 #endif /* HAS_APPLE_PAC */
590 }
591 }
592
593 #if HAS_SET_SAVED_STATE_PC
594 static inline void
set_saved_state_pc(arm_saved_state_t * iss,register_t pc)595 set_saved_state_pc(arm_saved_state_t *iss, register_t pc)
596 {
597 if (is_saved_state32(iss)) {
598 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
599 } else {
600 #if HAS_APPLE_PAC
601 MANIPULATE_SIGNED_THREAD_STATE(iss,
602 "mov x1, %[pc] \n"
603 "str x1, [x0, %[SS64_PC]] \n",
604 [pc] "r"(pc)
605 );
606 #else
607 saved_state64(iss)->pc = (unsigned long)pc;
608 #endif /* HAS_APPLE_PAC */
609 }
610 }
611 #endif /* HAS_SET_SAVED_STATE_PC */
612
613 static inline void
set_user_saved_state_pc(arm_saved_state_t * iss,register_t pc)614 set_user_saved_state_pc(arm_saved_state_t *iss, register_t pc)
615 {
616 if (is_saved_state32(iss)) {
617 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
618 } else {
619 #if HAS_APPLE_PAC
620 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
621 "mov x1, %[pc] \n"
622 "str x1, [x0, %[SS64_PC]] \n",
623 [pc] "r"(pc)
624 );
625 #else
626 saved_state64(iss)->pc = (unsigned long)pc;
627 #endif /* HAS_APPLE_PAC */
628 }
629 }
630
631 static inline register_t
get_saved_state_sp(const arm_saved_state_t * iss)632 get_saved_state_sp(const arm_saved_state_t *iss)
633 {
634 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp);
635 }
636
637 static inline void
set_saved_state_sp(arm_saved_state_t * iss,register_t sp)638 set_saved_state_sp(arm_saved_state_t *iss, register_t sp)
639 {
640 if (is_saved_state32(iss)) {
641 saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp);
642 } else {
643 saved_state64(iss)->sp = (uint64_t)sp;
644 }
645 }
646
647 static inline register_t
get_saved_state_lr(const arm_saved_state_t * iss)648 get_saved_state_lr(const arm_saved_state_t *iss)
649 {
650 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr);
651 }
652
653 #if HAS_SET_SAVED_STATE_LR
654 static inline void
set_saved_state_lr(arm_saved_state_t * iss,register_t lr)655 set_saved_state_lr(arm_saved_state_t *iss, register_t lr)
656 {
657 if (is_saved_state32(iss)) {
658 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
659 } else {
660 #if HAS_APPLE_PAC
661 MANIPULATE_SIGNED_THREAD_STATE(iss,
662 "mov x3, %[lr] \n"
663 "str x3, [x0, %[SS64_LR]] \n",
664 [lr] "r"(lr)
665 );
666 #else
667 saved_state64(iss)->lr = (unsigned long)lr;
668 #endif /* HAS_APPLE_PAC */
669 }
670 }
671 #endif /* HAS_SET_SAVED_STATE_PC */
672
673 static inline void
set_user_saved_state_lr(arm_saved_state_t * iss,register_t lr)674 set_user_saved_state_lr(arm_saved_state_t *iss, register_t lr)
675 {
676 if (is_saved_state32(iss)) {
677 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
678 } else {
679 #if HAS_APPLE_PAC
680 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
681 "mov x3, %[lr] \n"
682 "str x3, [x0, %[SS64_LR]] \n",
683 [lr] "r"(lr)
684 );
685 #else
686 saved_state64(iss)->lr = (unsigned long)lr;
687 #endif /* HAS_APPLE_PAC */
688 }
689 }
690
691 static inline register_t
get_saved_state_fp(const arm_saved_state_t * iss)692 get_saved_state_fp(const arm_saved_state_t *iss)
693 {
694 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp);
695 }
696
697 static inline void
set_saved_state_fp(arm_saved_state_t * iss,register_t fp)698 set_saved_state_fp(arm_saved_state_t *iss, register_t fp)
699 {
700 if (is_saved_state32(iss)) {
701 saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp);
702 } else {
703 saved_state64(iss)->fp = (uint64_t)fp;
704 }
705 }
706
707 static inline int
check_saved_state_reglimit(const arm_saved_state_t * iss,unsigned reg)708 check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg)
709 {
710 return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT);
711 }
712
713 static inline register_t
get_saved_state_reg(const arm_saved_state_t * iss,unsigned reg)714 get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg)
715 {
716 if (!check_saved_state_reglimit(iss, reg)) {
717 return 0;
718 }
719
720 return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]));
721 }
722
723 #if HAS_SET_SAVED_STATE_REG
724 static inline void
set_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)725 set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
726 {
727 if (!check_saved_state_reglimit(iss, reg)) {
728 return;
729 }
730
731 if (is_saved_state32(iss)) {
732 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
733 } else {
734 #if HAS_APPLE_PAC
735 /* x16 and x17 are part of the jophash */
736 if (reg == 16) {
737 MANIPULATE_SIGNED_THREAD_STATE(iss,
738 "mov x4, %[value] \n"
739 "str x4, [x0, %[SS64_X16]] \n",
740 [value] "r"(value)
741 );
742 return;
743 } else if (reg == 17) {
744 MANIPULATE_SIGNED_THREAD_STATE(iss,
745 "mov x5, %[value] \n"
746 "str x5, [x0, %[SS64_X17]] \n",
747 [value] "r"(value),
748 [SS64_X17] "i"(ss64_offsetof(x[17]))
749 );
750 return;
751 }
752 #endif
753 saved_state64(iss)->x[reg] = (uint64_t)value;
754 }
755 }
756 #endif /* HAS_SET_SAVED_STATE_REG */
757
758 static inline void
set_user_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)759 set_user_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
760 {
761 if (!check_saved_state_reglimit(iss, reg)) {
762 return;
763 }
764
765 if (is_saved_state32(iss)) {
766 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
767 } else {
768 #if HAS_APPLE_PAC
769 /* x16 and x17 are part of the jophash */
770 if (reg == 16) {
771 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
772 "mov x4, %[value] \n"
773 "str x4, [x0, %[SS64_X16]] \n",
774 [value] "r"(value)
775 );
776 return;
777 } else if (reg == 17) {
778 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
779 "mov x5, %[value] \n"
780 "str x5, [x0, %[SS64_X17]] \n",
781 [value] "r"(value),
782 [SS64_X17] "i"(ss64_offsetof(x[17]))
783 );
784 return;
785 }
786 #endif
787 saved_state64(iss)->x[reg] = (uint64_t)value;
788 }
789 }
790
791
792 static inline uint32_t
get_saved_state_cpsr(const arm_saved_state_t * iss)793 get_saved_state_cpsr(const arm_saved_state_t *iss)
794 {
795 return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr;
796 }
797
798 #if HAS_MASK_SAVED_STATE_CPSR
799 static inline void
mask_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)800 mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
801 {
802 if (is_saved_state32(iss)) {
803 saved_state32(iss)->cpsr |= set_bits;
804 saved_state32(iss)->cpsr &= ~clear_bits;
805 } else {
806 #if HAS_APPLE_PAC
807 MANIPULATE_SIGNED_THREAD_STATE(iss,
808 "mov w6, %w[set_bits] \n"
809 "orr w2, w2, w6, lsl #0 \n"
810 "mov w6, %w[clear_bits] \n"
811 "bic w2, w2, w6, lsl #0 \n"
812 "str w2, [x0, %[SS64_CPSR]] \n",
813 [set_bits] "r"(set_bits),
814 [clear_bits] "r"(clear_bits)
815 );
816 #else
817 saved_state64(iss)->cpsr |= set_bits;
818 saved_state64(iss)->cpsr &= ~clear_bits;
819 #endif /* HAS_APPLE_PAC */
820 }
821 }
822 #endif /* HAS_MASK_SAVED_STATE_CPSR */
823
824 static inline void
mask_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)825 mask_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
826 {
827 if (is_saved_state32(iss)) {
828 uint32_t new_cpsr = saved_state32(iss)->cpsr;
829 new_cpsr |= set_bits;
830 new_cpsr &= ~clear_bits;
831 if (!PSR_IS_USER(new_cpsr)) {
832 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
833 }
834 saved_state32(iss)->cpsr = new_cpsr;
835 } else {
836 #if HAS_APPLE_PAC
837 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
838 "mov w6, %w[set_bits] \n"
839 "orr w2, w2, w6, lsl #0 \n"
840 "mov w6, %w[clear_bits] \n"
841 "bic w2, w2, w6, lsl #0 \n"
842 "and w6, w2, %[CPSR_EL_MASK] \n"
843 "cmp w6, %[CPSR_EL0] \n"
844 "b.eq 1f \n"
845 "mov w1, w2 \n"
846 "b _ml_panic_on_invalid_new_cpsr \n"
847 "1: \n"
848 "str w2, [x0, %[SS64_CPSR]] \n",
849 [set_bits] "r"(set_bits),
850 [clear_bits] "r"(clear_bits)
851 );
852 #else
853 uint32_t new_cpsr = saved_state64(iss)->cpsr;
854 new_cpsr |= set_bits;
855 new_cpsr &= ~clear_bits;
856 if (!PSR64_IS_USER(new_cpsr)) {
857 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
858 }
859 saved_state64(iss)->cpsr = new_cpsr;
860 #endif /* HAS_APPLE_PAC */
861 }
862 }
863
864 #if HAS_SET_SAVED_STATE_CPSR
865 static inline void
set_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)866 set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
867 {
868 if (is_saved_state32(iss)) {
869 saved_state32(iss)->cpsr = cpsr;
870 } else {
871 #if HAS_APPLE_PAC
872 MANIPULATE_SIGNED_THREAD_STATE(iss,
873 "mov w2, %w[cpsr] \n"
874 "str w2, [x0, %[SS64_CPSR]] \n",
875 [cpsr] "r"(cpsr)
876 );
877 #else
878 saved_state64(iss)->cpsr = cpsr;
879 #endif /* HAS_APPLE_PAC */
880 }
881 }
882 #endif /* HAS_SET_SAVED_STATE_CPSR */
883
884 static inline void
set_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)885 set_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
886 {
887 if (is_saved_state32(iss)) {
888 if (!PSR_IS_USER(cpsr)) {
889 ml_panic_on_invalid_new_cpsr(iss, cpsr);
890 }
891 saved_state32(iss)->cpsr = cpsr;
892 } else {
893 #if HAS_APPLE_PAC
894 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
895 "mov w2, %w[cpsr] \n"
896 "and w6, w2, %[CPSR_EL_MASK] \n"
897 "cmp w6, %[CPSR_EL0] \n"
898 "b.eq 1f \n"
899 "mov w1, w2 \n"
900 "b _ml_panic_on_invalid_new_cpsr \n"
901 "1: \n"
902 "str w2, [x0, %[SS64_CPSR]] \n",
903 [cpsr] "r"(cpsr)
904 );
905 #else
906 if (!PSR64_IS_USER(cpsr)) {
907 ml_panic_on_invalid_new_cpsr(iss, cpsr);
908 }
909 saved_state64(iss)->cpsr = cpsr;
910 #endif /* HAS_APPLE_PAC */
911 }
912 }
913
914 static inline register_t
get_saved_state_far(const arm_saved_state_t * iss)915 get_saved_state_far(const arm_saved_state_t *iss)
916 {
917 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far);
918 }
919
920 static inline void
set_saved_state_far(arm_saved_state_t * iss,register_t far)921 set_saved_state_far(arm_saved_state_t *iss, register_t far)
922 {
923 if (is_saved_state32(iss)) {
924 saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far);
925 } else {
926 saved_state64(iss)->far = (uint64_t)far;
927 }
928 }
929
930 static inline uint32_t
get_saved_state_esr(const arm_saved_state_t * iss)931 get_saved_state_esr(const arm_saved_state_t *iss)
932 {
933 return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr;
934 }
935
936 static inline void
set_saved_state_esr(arm_saved_state_t * iss,uint32_t esr)937 set_saved_state_esr(arm_saved_state_t *iss, uint32_t esr)
938 {
939 if (is_saved_state32(iss)) {
940 saved_state32(iss)->esr = esr;
941 } else {
942 saved_state64(iss)->esr = esr;
943 }
944 }
945
946 static inline uint32_t
get_saved_state_exc(const arm_saved_state_t * iss)947 get_saved_state_exc(const arm_saved_state_t *iss)
948 {
949 return is_saved_state32(iss) ? const_saved_state32(iss)->exception : const_saved_state64(iss)->exception;
950 }
951
952 static inline void
set_saved_state_exc(arm_saved_state_t * iss,uint32_t exc)953 set_saved_state_exc(arm_saved_state_t *iss, uint32_t exc)
954 {
955 if (is_saved_state32(iss)) {
956 saved_state32(iss)->exception = exc;
957 } else {
958 saved_state64(iss)->exception = exc;
959 }
960 }
961
962 extern void panic_unimplemented(void);
963
964 /**
965 * Extracts the SVC (Supervisor Call) number from the appropriate GPR (General
966 * Purpose Register).
967 *
968 * @param[in] iss the 32-bit or 64-bit ARM saved state (i.e. trap frame).
969 *
970 * @return The SVC number.
971 */
972 static inline int
get_saved_state_svc_number(const arm_saved_state_t * iss)973 get_saved_state_svc_number(const arm_saved_state_t *iss)
974 {
975 return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */
976 }
977
978 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t;
979
980 struct arm_debug_aggregate_state {
981 arm_state_hdr_t dsh;
982 union {
983 arm_debug_state32_t ds32;
984 arm_debug_state64_t ds64;
985 } uds;
986 os_refcnt_t ref;
987 } __attribute__((aligned(16)));
988
989 typedef struct arm_debug_aggregate_state arm_debug_state_t;
990
991 #define ARM_LEGACY_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
992 (sizeof (arm_legacy_debug_state_t)/sizeof(uint32_t)))
993
994 /*
995 * NEON context
996 */
997 typedef __uint128_t uint128_t;
998 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2)));
999 typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4)));
1000
1001 struct arm_neon_saved_state32 {
1002 union {
1003 uint128_t q[16];
1004 uint64_t d[32];
1005 uint32_t s[32];
1006 } v;
1007 uint32_t fpsr;
1008 uint32_t fpcr;
1009 };
1010 typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t;
1011
1012 #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
1013 (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int)))
1014
1015 struct arm_neon_saved_state64 {
1016 union {
1017 uint128_t q[32];
1018 uint64x2_t d[32];
1019 uint32x4_t s[32];
1020 } v;
1021 uint32_t fpsr;
1022 uint32_t fpcr;
1023 };
1024 typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t;
1025
1026 #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
1027 (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int)))
1028
1029 struct arm_neon_saved_state {
1030 arm_state_hdr_t nsh;
1031 union {
1032 struct arm_neon_saved_state32 ns_32;
1033 struct arm_neon_saved_state64 ns_64;
1034 } uns;
1035 };
1036 typedef struct arm_neon_saved_state arm_neon_saved_state_t;
1037 #define ns_32 uns.ns_32
1038 #define ns_64 uns.ns_64
1039
1040 struct arm_kernel_neon_saved_state {
1041 uint64_t d[8];
1042 uint32_t fpcr;
1043 };
1044 typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t;
1045
1046 static inline boolean_t
is_neon_saved_state32(const arm_neon_saved_state_t * state)1047 is_neon_saved_state32(const arm_neon_saved_state_t *state)
1048 {
1049 return state->nsh.flavor == ARM_NEON_SAVED_STATE32;
1050 }
1051
1052 static inline boolean_t
is_neon_saved_state64(const arm_neon_saved_state_t * state)1053 is_neon_saved_state64(const arm_neon_saved_state_t *state)
1054 {
1055 return state->nsh.flavor == ARM_NEON_SAVED_STATE64;
1056 }
1057
1058 static inline arm_neon_saved_state32_t *
neon_state32(arm_neon_saved_state_t * state)1059 neon_state32(arm_neon_saved_state_t *state)
1060 {
1061 return &state->ns_32;
1062 }
1063
1064 static inline arm_neon_saved_state64_t *
neon_state64(arm_neon_saved_state_t * state)1065 neon_state64(arm_neon_saved_state_t *state)
1066 {
1067 return &state->ns_64;
1068 }
1069
1070
1071
1072 /*
1073 * Aggregated context
1074 */
1075
1076 struct arm_context {
1077 struct arm_saved_state ss;
1078 struct arm_neon_saved_state ns;
1079 };
1080 typedef struct arm_context arm_context_t;
1081
1082 struct arm_kernel_context {
1083 struct arm_kernel_saved_state ss;
1084 struct arm_kernel_neon_saved_state ns;
1085 };
1086 typedef struct arm_kernel_context arm_kernel_context_t;
1087
1088 extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*);
1089 extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*);
1090
1091 #else /* defined(__arm64__) */
1092 #error Unknown arch
1093 #endif /* defined(__arm64__) */
1094
1095 extern void saved_state_to_thread_state32(const arm_saved_state_t*, arm_thread_state32_t*);
1096 extern void thread_state32_to_saved_state(const arm_thread_state32_t*, arm_saved_state_t*);
1097
1098 #endif /* XNU_KERNEL_PRIVATE */
1099
1100 #endif /* defined (__arm__) || defined (__arm64__) */
1101
1102 #endif /* _ARM_THREAD_STATUS_H_ */
1103