1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * FILE_ID: thread_status.h
30 */
31
32
33 #ifndef _ARM_THREAD_STATUS_H_
34 #define _ARM_THREAD_STATUS_H_
35
36 #if defined (__arm__) || defined (__arm64__)
37
38 #include <mach/machine/_structs.h>
39 #include <mach/machine/thread_state.h>
40 #include <mach/message.h>
41 #include <mach/vm_types.h>
42
43 #ifdef XNU_KERNEL_PRIVATE
44 #include <os/refcnt.h>
45 #endif
46
47 /*
48 * Support for determining the state of a thread
49 */
50
51
52 /*
53 * Flavors
54 */
55
56 #define ARM_THREAD_STATE 1
57 #define ARM_UNIFIED_THREAD_STATE ARM_THREAD_STATE
58 #define ARM_VFP_STATE 2
59 #define ARM_EXCEPTION_STATE 3
60 #define ARM_DEBUG_STATE 4 /* pre-armv8 */
61 #define THREAD_STATE_NONE 5
62 #define ARM_THREAD_STATE64 6
63 #define ARM_EXCEPTION_STATE64 7
64 // ARM_THREAD_STATE_LAST 8 /* legacy */
65 #define ARM_THREAD_STATE32 9
66
67 #ifdef XNU_KERNEL_PRIVATE
68 #define X86_THREAD_STATE_NONE 13 /* i386/thread_status.h THREAD_STATE_NONE */
69 #endif /* XNU_KERNEL_PRIVATE */
70
71 /* API */
72 #define ARM_DEBUG_STATE32 14
73 #define ARM_DEBUG_STATE64 15
74 #define ARM_NEON_STATE 16
75 #define ARM_NEON_STATE64 17
76 #define ARM_CPMU_STATE64 18
77
78 #ifdef XNU_KERNEL_PRIVATE
79 /* For kernel use */
80 #define ARM_SAVED_STATE32 20
81 #define ARM_SAVED_STATE64 21
82 #define ARM_NEON_SAVED_STATE32 22
83 #define ARM_NEON_SAVED_STATE64 23
84 #endif /* XNU_KERNEL_PRIVATE */
85
86 #define ARM_PAGEIN_STATE 27
87
88
89 #define THREAD_STATE_FLAVORS 29 /* This must be updated to 1 more than the highest numerical state flavor */
90
91 #ifndef ARM_STATE_FLAVOR_IS_OTHER_VALID
92 #define ARM_STATE_FLAVOR_IS_OTHER_VALID(_flavor_) 0
93 #endif
94
95 #define FLAVOR_MODIFIES_CORE_CPU_REGISTERS(x) \
96 ((x == ARM_THREAD_STATE) || \
97 (x == ARM_THREAD_STATE32) || \
98 (x == ARM_THREAD_STATE64))
99
100 #define VALID_THREAD_STATE_FLAVOR(x) \
101 ((x == ARM_THREAD_STATE) || \
102 (x == ARM_VFP_STATE) || \
103 (x == ARM_EXCEPTION_STATE) || \
104 (x == ARM_DEBUG_STATE) || \
105 (x == THREAD_STATE_NONE) || \
106 (x == ARM_THREAD_STATE32) || \
107 (x == ARM_THREAD_STATE64) || \
108 (x == ARM_EXCEPTION_STATE64) || \
109 (x == ARM_NEON_STATE) || \
110 (x == ARM_NEON_STATE64) || \
111 (x == ARM_DEBUG_STATE32) || \
112 (x == ARM_DEBUG_STATE64) || \
113 (x == ARM_PAGEIN_STATE) || \
114 (ARM_STATE_FLAVOR_IS_OTHER_VALID(x)))
115
116 struct arm_state_hdr {
117 uint32_t flavor;
118 uint32_t count;
119 };
120 typedef struct arm_state_hdr arm_state_hdr_t;
121
122 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state_t;
123 typedef _STRUCT_ARM_THREAD_STATE arm_thread_state32_t;
124 typedef _STRUCT_ARM_THREAD_STATE64 arm_thread_state64_t;
125
126 #if !defined(KERNEL)
127 #if __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__)
128
129 /* Accessor macros for arm_thread_state64_t pointer fields */
130
131 /* Return pc field of arm_thread_state64_t as a data pointer value */
132 #define arm_thread_state64_get_pc(ts) \
133 __darwin_arm_thread_state64_get_pc(ts)
134 /* Return pc field of arm_thread_state64_t as a function pointer. May return
135 * NULL if a valid function pointer cannot be constructed, the caller should
136 * fall back to the arm_thread_state64_get_pc() macro in that case. */
137 #define arm_thread_state64_get_pc_fptr(ts) \
138 __darwin_arm_thread_state64_get_pc_fptr(ts)
139 /* Set pc field of arm_thread_state64_t to a function pointer */
140 #define arm_thread_state64_set_pc_fptr(ts, fptr) \
141 __darwin_arm_thread_state64_set_pc_fptr(ts, fptr)
142 /* Return lr field of arm_thread_state64_t as a data pointer value */
143 #define arm_thread_state64_get_lr(ts) \
144 __darwin_arm_thread_state64_get_lr(ts)
145 /* Return lr field of arm_thread_state64_t as a function pointer. May return
146 * NULL if a valid function pointer cannot be constructed, the caller should
147 * fall back to the arm_thread_state64_get_lr() macro in that case. */
148 #define arm_thread_state64_get_lr_fptr(ts) \
149 __darwin_arm_thread_state64_get_lr_fptr(ts)
150 /* Set lr field of arm_thread_state64_t to a function pointer */
151 #define arm_thread_state64_set_lr_fptr(ts, fptr) \
152 __darwin_arm_thread_state64_set_lr_fptr(ts, fptr)
153 /* Return sp field of arm_thread_state64_t as a data pointer value */
154 #define arm_thread_state64_get_sp(ts) \
155 __darwin_arm_thread_state64_get_sp(ts)
156 /* Set sp field of arm_thread_state64_t to a data pointer value */
157 #define arm_thread_state64_set_sp(ts, ptr) \
158 __darwin_arm_thread_state64_set_sp(ts, ptr)
159 /* Return fp field of arm_thread_state64_t as a data pointer value */
160 #define arm_thread_state64_get_fp(ts) \
161 __darwin_arm_thread_state64_get_fp(ts)
162 /* Set fp field of arm_thread_state64_t to a data pointer value */
163 #define arm_thread_state64_set_fp(ts, ptr) \
164 __darwin_arm_thread_state64_set_fp(ts, ptr)
165 /* Strip ptr auth bits from pc, lr, sp and fp field of arm_thread_state64_t */
166 #define arm_thread_state64_ptrauth_strip(ts) \
167 __darwin_arm_thread_state64_ptrauth_strip(ts)
168
169 #endif /* __DARWIN_C_LEVEL >= __DARWIN_C_FULL && defined(__arm64__) */
170 #endif /* !defined(KERNEL) */
171
172 struct arm_unified_thread_state {
173 arm_state_hdr_t ash;
174 union {
175 arm_thread_state32_t ts_32;
176 arm_thread_state64_t ts_64;
177 } uts;
178 };
179 #define ts_32 uts.ts_32
180 #define ts_64 uts.ts_64
181 typedef struct arm_unified_thread_state arm_unified_thread_state_t;
182
183 #define ARM_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
184 (sizeof (arm_thread_state_t)/sizeof(uint32_t)))
185 #define ARM_THREAD_STATE32_COUNT ((mach_msg_type_number_t) \
186 (sizeof (arm_thread_state32_t)/sizeof(uint32_t)))
187 #define ARM_THREAD_STATE64_COUNT ((mach_msg_type_number_t) \
188 (sizeof (arm_thread_state64_t)/sizeof(uint32_t)))
189 #define ARM_UNIFIED_THREAD_STATE_COUNT ((mach_msg_type_number_t) \
190 (sizeof (arm_unified_thread_state_t)/sizeof(uint32_t)))
191
192
193 typedef _STRUCT_ARM_VFP_STATE arm_vfp_state_t;
194 typedef _STRUCT_ARM_NEON_STATE arm_neon_state_t;
195 typedef _STRUCT_ARM_NEON_STATE arm_neon_state32_t;
196 typedef _STRUCT_ARM_NEON_STATE64 arm_neon_state64_t;
197
198
199 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state_t;
200 typedef _STRUCT_ARM_EXCEPTION_STATE arm_exception_state32_t;
201 typedef _STRUCT_ARM_EXCEPTION_STATE64 arm_exception_state64_t;
202
203 typedef _STRUCT_ARM_DEBUG_STATE32 arm_debug_state32_t;
204 typedef _STRUCT_ARM_DEBUG_STATE64 arm_debug_state64_t;
205
206 typedef _STRUCT_ARM_PAGEIN_STATE arm_pagein_state_t;
207
208 #if defined(XNU_KERNEL_PRIVATE) && defined(__arm64__)
209 /* See below for ARM64 kernel structure definition for arm_debug_state. */
210 #else /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
211 /*
212 * Otherwise not ARM64 kernel and we must preserve legacy ARM definitions of
213 * arm_debug_state for binary compatability of userland consumers of this file.
214 */
215 #if defined(__arm__)
216 typedef _STRUCT_ARM_DEBUG_STATE arm_debug_state_t;
217 #elif defined(__arm64__)
218 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_debug_state_t;
219 #else /* defined(__arm__) */
220 #error Undefined architecture
221 #endif /* defined(__arm__) */
222 #endif /* defined(XNU_KERNEL_PRIVATE) && defined(__arm64__) */
223
224 #define ARM_VFP_STATE_COUNT ((mach_msg_type_number_t) \
225 (sizeof (arm_vfp_state_t)/sizeof(uint32_t)))
226
227 #define ARM_EXCEPTION_STATE_COUNT ((mach_msg_type_number_t) \
228 (sizeof (arm_exception_state_t)/sizeof(uint32_t)))
229
230 #define ARM_EXCEPTION_STATE64_COUNT ((mach_msg_type_number_t) \
231 (sizeof (arm_exception_state64_t)/sizeof(uint32_t)))
232
233 #define ARM_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
234 (sizeof (arm_debug_state_t)/sizeof(uint32_t)))
235
236 #define ARM_DEBUG_STATE32_COUNT ((mach_msg_type_number_t) \
237 (sizeof (arm_debug_state32_t)/sizeof(uint32_t)))
238
239 #define ARM_PAGEIN_STATE_COUNT ((mach_msg_type_number_t) \
240 (sizeof (arm_pagein_state_t)/sizeof(uint32_t)))
241
242 #define ARM_DEBUG_STATE64_COUNT ((mach_msg_type_number_t) \
243 (sizeof (arm_debug_state64_t)/sizeof(uint32_t)))
244
245 #define ARM_NEON_STATE_COUNT ((mach_msg_type_number_t) \
246 (sizeof (arm_neon_state_t)/sizeof(uint32_t)))
247
248 #define ARM_NEON_STATE64_COUNT ((mach_msg_type_number_t) \
249 (sizeof (arm_neon_state64_t)/sizeof(uint32_t)))
250
251 #define MACHINE_THREAD_STATE ARM_THREAD_STATE
252 #define MACHINE_THREAD_STATE_COUNT ARM_UNIFIED_THREAD_STATE_COUNT
253
254
255 /*
256 * Largest state on this machine:
257 */
258 #define THREAD_MACHINE_STATE_MAX THREAD_STATE_MAX
259
260 #ifdef XNU_KERNEL_PRIVATE
261
262 #if CONFIG_DTRACE
263 #define HAS_ADD_SAVED_STATE_PC 1
264 #define HAS_SET_SAVED_STATE_PC 1
265 #define HAS_SET_SAVED_STATE_LR 1
266 #define HAS_SET_SAVED_STATE_REG 1
267 #define HAS_MASK_SAVED_STATE_CPSR 1
268 #endif /* CONFIG_DTRACE */
269
270 #if CONFIG_KDP_INTERACTIVE_DEBUGGING
271 #define HAS_SET_SAVED_STATE_CPSR 1
272 #endif /* CONFIG_KDP_INTERACTIVE_DEBUGGING */
273
274 #if CONFIG_XNUPOST
275 #define HAS_ADD_SAVED_STATE_PC 1
276 #define HAS_SET_SAVED_STATE_PC 1
277 #endif /* CONFIG_DTRACE */
278
279 #if DEBUG || DEVELOPMENT
280 #define HAS_ADD_SAVED_STATE_PC 1
281 #endif
282
283
284 static inline boolean_t
is_thread_state32(const arm_unified_thread_state_t * its)285 is_thread_state32(const arm_unified_thread_state_t *its)
286 {
287 return its->ash.flavor == ARM_THREAD_STATE32;
288 }
289
290 static inline boolean_t
is_thread_state64(const arm_unified_thread_state_t * its)291 is_thread_state64(const arm_unified_thread_state_t *its)
292 {
293 return its->ash.flavor == ARM_THREAD_STATE64;
294 }
295
296 static inline arm_thread_state32_t*
thread_state32(arm_unified_thread_state_t * its)297 thread_state32(arm_unified_thread_state_t *its)
298 {
299 return &its->ts_32;
300 }
301
302 static inline arm_thread_state64_t*
thread_state64(arm_unified_thread_state_t * its)303 thread_state64(arm_unified_thread_state_t *its)
304 {
305 return &its->ts_64;
306 }
307
308 static inline const arm_thread_state32_t*
const_thread_state32(const arm_unified_thread_state_t * its)309 const_thread_state32(const arm_unified_thread_state_t *its)
310 {
311 return &its->ts_32;
312 }
313
314 static inline const arm_thread_state64_t*
const_thread_state64(const arm_unified_thread_state_t * its)315 const_thread_state64(const arm_unified_thread_state_t *its)
316 {
317 return &its->ts_64;
318 }
319
320 #if defined(__arm64__)
321
322 #include <kern/assert.h>
323 #include <arm64/proc_reg.h>
324 #define CAST_ASSERT_SAFE(type, val) (assert((val) == ((type)(val))), (type)(val))
325
326 /*
327 * GPR context
328 */
329
330 struct arm_saved_state32 {
331 uint32_t r[13]; /* General purpose register r0-r12 */
332 uint32_t sp; /* Stack pointer r13 */
333 uint32_t lr; /* Link register r14 */
334 uint32_t pc; /* Program counter r15 */
335 uint32_t cpsr; /* Current program status register */
336 uint32_t far; /* Virtual fault address */
337 uint32_t esr; /* Exception syndrome register */
338 uint32_t exception; /* Exception number */
339 };
340 typedef struct arm_saved_state32 arm_saved_state32_t;
341
342 struct arm_saved_state32_tagged {
343 uint32_t tag;
344 struct arm_saved_state32 state;
345 };
346 typedef struct arm_saved_state32_tagged arm_saved_state32_tagged_t;
347
348 #define ARM_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
349 (sizeof(arm_saved_state32_t)/sizeof(unsigned int)))
350
351 struct arm_saved_state64 {
352 uint64_t x[29]; /* General purpose registers x0-x28 */
353 uint64_t fp; /* Frame pointer x29 */
354 uint64_t lr; /* Link register x30 */
355 uint64_t sp; /* Stack pointer x31 */
356 uint64_t pc; /* Program counter */
357 uint32_t cpsr; /* Current program status register */
358 uint32_t reserved; /* Reserved padding */
359 uint64_t far; /* Virtual fault address */
360 uint32_t esr; /* Exception syndrome register */
361 uint32_t exception; /* Exception number */
362 #if HAS_APPLE_PAC
363 uint64_t jophash;
364 #endif /* HAS_APPLE_PAC */
365 };
366 typedef struct arm_saved_state64 arm_saved_state64_t;
367
368 #define ARM_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
369 (sizeof(arm_saved_state64_t)/sizeof(unsigned int)))
370
371 struct arm_saved_state64_tagged {
372 uint32_t tag;
373 struct arm_saved_state64 state;
374 };
375 typedef struct arm_saved_state64_tagged arm_saved_state64_tagged_t;
376
377 struct arm_saved_state {
378 arm_state_hdr_t ash;
379 union {
380 struct arm_saved_state32 ss_32;
381 struct arm_saved_state64 ss_64;
382 } uss;
383 } __attribute__((aligned(16)));
384 #define ss_32 uss.ss_32
385 #define ss_64 uss.ss_64
386
387 typedef struct arm_saved_state arm_saved_state_t;
388
389 struct arm_kernel_saved_state {
390 uint64_t x[10]; /* General purpose registers x19-x28 */
391 uint64_t fp; /* Frame pointer x29 */
392 uint64_t lr; /* Link register x30 */
393 uint64_t sp; /* Stack pointer x31 */
394 /* Some things here we DO need to preserve */
395 uint8_t pc_was_in_userspace;
396 uint8_t ssbs;
397 uint8_t dit;
398 uint8_t uao;
399 } __attribute__((aligned(16)));
400
401 typedef struct arm_kernel_saved_state arm_kernel_saved_state_t;
402
403 extern void ml_panic_on_invalid_old_cpsr(const arm_saved_state_t *) __attribute__((noreturn));
404
405 extern void ml_panic_on_invalid_new_cpsr(const arm_saved_state_t *, uint32_t) __attribute__((noreturn));
406
407 #if HAS_APPLE_PAC
408
409 #include <sys/cdefs.h>
410
411 /*
412 * Used by MANIPULATE_SIGNED_THREAD_STATE(), potentially from C++ (IOKit) code.
413 * Open-coded to prevent a circular dependency between mach/arm/thread_status.h
414 * and osfmk/arm/machine_routines.h.
415 */
416 __BEGIN_DECLS
417 extern uint64_t ml_pac_safe_interrupts_disable(void);
418 extern void ml_pac_safe_interrupts_restore(uint64_t);
419 __END_DECLS
420
421 /*
422 * Methods used to sign and check thread state to detect corruptions of saved
423 * thread state across exceptions and context switches.
424 */
425 extern void ml_sign_thread_state(arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
426
427 extern void ml_check_signed_state(const arm_saved_state_t *, uint64_t, uint32_t, uint64_t, uint64_t, uint64_t);
428
429 /* XXX: including stddef.f here breaks ctfmerge on some builds, so use __builtin_offsetof() instead of offsetof() */
430 #define ss64_offsetof(x) __builtin_offsetof(struct arm_saved_state, ss_64.x)
431
432 /**
433 * Verify the signed thread state in _iss, execute the assembly instructions
434 * _instr, and re-sign the modified thread state. Varargs specify additional
435 * inputs.
436 *
437 * _instr may read or modify the thread state in the following registers:
438 *
439 * x0: _iss
440 * x1: authed _iss->ss_64.pc
441 * w2: authed _iss->ss_64.cpsr
442 * x3: authed _iss->ss_64.lr
443 * x4: authed _iss->ss_64.x16
444 * x5: authed _iss->ss_64.x17
445 * x6: scratch register
446 * x7: scratch register
447 *
448 * If _instr makes no changes to the thread state, it may skip re-signing by
449 * branching to label 0.
450 */
451 #define MANIPULATE_SIGNED_THREAD_STATE(_iss, _instr, ...) \
452 do { \
453 uint64_t _intr = ml_pac_safe_interrupts_disable(); \
454 asm volatile ( \
455 "mov x9, lr" "\n" \
456 "mov x0, %[iss]" "\n" \
457 "msr SPSel, #1" "\n" \
458 "ldp x4, x5, [x0, %[SS64_X16]]" "\n" \
459 "ldr x7, [x0, %[SS64_PC]]" "\n" \
460 "ldr w8, [x0, %[SS64_CPSR]]" "\n" \
461 "ldr x3, [x0, %[SS64_LR]]" "\n" \
462 "mov x1, x7" "\n" \
463 "mov w2, w8" "\n" \
464 "bl _ml_check_signed_state" "\n" \
465 "mov x1, x7" "\n" \
466 "mov w2, w8" "\n" \
467 _instr "\n" \
468 "bl _ml_sign_thread_state" "\n" \
469 "0:" "\n" \
470 "msr SPSel, #0" "\n" \
471 "mov lr, x9" "\n" \
472 : \
473 : [iss] "r"(_iss), \
474 [SS64_X16] "i"(ss64_offsetof(x[16])), \
475 [SS64_PC] "i"(ss64_offsetof(pc)), \
476 [SS64_CPSR] "i"(ss64_offsetof(cpsr)), \
477 [SS64_LR] "i"(ss64_offsetof(lr)),##__VA_ARGS__ \
478 : "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", "x8", \
479 "x9", "x16" \
480 ); \
481 ml_pac_safe_interrupts_restore(_intr); \
482 } while (0)
483
484 #define VERIFY_USER_THREAD_STATE_INSTR \
485 "and w6, w2, %[CPSR_EL_MASK]" "\n" \
486 "cmp w6, %[CPSR_EL0]" "\n" \
487 "b.eq 1f" "\n" \
488 "bl _ml_panic_on_invalid_old_cpsr" "\n" \
489 "brk #0" "\n" \
490 "1:" "\n"
491
492 #define VERIFY_USER_THREAD_STATE_INPUTS \
493 [CPSR_EL_MASK] "i"(PSR64_MODE_EL_MASK), \
494 [CPSR_EL0] "i"(PSR64_MODE_EL0)
495
496 #define MANIPULATE_SIGNED_USER_THREAD_STATE(_iss, _instr, ...) \
497 MANIPULATE_SIGNED_THREAD_STATE(_iss, \
498 VERIFY_USER_THREAD_STATE_INSTR \
499 _instr, \
500 VERIFY_USER_THREAD_STATE_INPUTS, ##__VA_ARGS__)
501
502 static inline void
check_and_sign_copied_user_thread_state(arm_saved_state_t * dst,const arm_saved_state_t * src)503 check_and_sign_copied_user_thread_state(arm_saved_state_t *dst, const arm_saved_state_t *src)
504 {
505 MANIPULATE_SIGNED_USER_THREAD_STATE(src,
506 "mov x0, %[dst]",
507 [dst] "r"(dst)
508 );
509 }
510 #endif /* HAS_APPLE_PAC */
511
512 static inline boolean_t
is_saved_state32(const arm_saved_state_t * iss)513 is_saved_state32(const arm_saved_state_t *iss)
514 {
515 return iss->ash.flavor == ARM_SAVED_STATE32;
516 }
517
518 static inline boolean_t
is_saved_state64(const arm_saved_state_t * iss)519 is_saved_state64(const arm_saved_state_t *iss)
520 {
521 return iss->ash.flavor == ARM_SAVED_STATE64;
522 }
523
524 static inline arm_saved_state32_t*
saved_state32(arm_saved_state_t * iss)525 saved_state32(arm_saved_state_t *iss)
526 {
527 return &iss->ss_32;
528 }
529
530 static inline const arm_saved_state32_t*
const_saved_state32(const arm_saved_state_t * iss)531 const_saved_state32(const arm_saved_state_t *iss)
532 {
533 return &iss->ss_32;
534 }
535
536 static inline arm_saved_state64_t*
saved_state64(arm_saved_state_t * iss)537 saved_state64(arm_saved_state_t *iss)
538 {
539 return &iss->ss_64;
540 }
541
542 static inline const arm_saved_state64_t*
const_saved_state64(const arm_saved_state_t * iss)543 const_saved_state64(const arm_saved_state_t *iss)
544 {
545 return &iss->ss_64;
546 }
547
548 static inline register_t
get_saved_state_pc(const arm_saved_state_t * iss)549 get_saved_state_pc(const arm_saved_state_t *iss)
550 {
551 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->pc : const_saved_state64(iss)->pc);
552 }
553
554 #if HAS_ADD_SAVED_STATE_PC
555 static inline void
add_saved_state_pc(arm_saved_state_t * iss,int diff)556 add_saved_state_pc(arm_saved_state_t *iss, int diff)
557 {
558 if (is_saved_state32(iss)) {
559 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
560 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
561 } else {
562 #if HAS_APPLE_PAC
563 MANIPULATE_SIGNED_THREAD_STATE(iss,
564 "mov w6, %w[diff] \n"
565 "add x1, x1, w6, sxtw \n"
566 "str x1, [x0, %[SS64_PC]] \n",
567 [diff] "r"(diff)
568 );
569 #else
570 saved_state64(iss)->pc += (unsigned long)diff;
571 #endif /* HAS_APPLE_PAC */
572 }
573 }
574 #endif /* HAS_ADD_SAVED_STATE_PC */
575
576 static inline void
add_user_saved_state_pc(arm_saved_state_t * iss,int diff)577 add_user_saved_state_pc(arm_saved_state_t *iss, int diff)
578 {
579 if (is_saved_state32(iss)) {
580 uint64_t pc = saved_state32(iss)->pc + (uint32_t)diff;
581 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
582 } else {
583 #if HAS_APPLE_PAC
584 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
585 "mov w6, %w[diff] \n"
586 "add x1, x1, w6, sxtw \n"
587 "str x1, [x0, %[SS64_PC]] \n",
588 [diff] "r"(diff)
589 );
590 #else
591 saved_state64(iss)->pc += (unsigned long)diff;
592 #endif /* HAS_APPLE_PAC */
593 }
594 }
595
596 #if HAS_SET_SAVED_STATE_PC
597 static inline void
set_saved_state_pc(arm_saved_state_t * iss,register_t pc)598 set_saved_state_pc(arm_saved_state_t *iss, register_t pc)
599 {
600 if (is_saved_state32(iss)) {
601 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
602 } else {
603 #if HAS_APPLE_PAC
604 MANIPULATE_SIGNED_THREAD_STATE(iss,
605 "mov x1, %[pc] \n"
606 "str x1, [x0, %[SS64_PC]] \n",
607 [pc] "r"(pc)
608 );
609 #else
610 saved_state64(iss)->pc = (unsigned long)pc;
611 #endif /* HAS_APPLE_PAC */
612 }
613 }
614 #endif /* HAS_SET_SAVED_STATE_PC */
615
616 static inline void
set_user_saved_state_pc(arm_saved_state_t * iss,register_t pc)617 set_user_saved_state_pc(arm_saved_state_t *iss, register_t pc)
618 {
619 if (is_saved_state32(iss)) {
620 saved_state32(iss)->pc = CAST_ASSERT_SAFE(uint32_t, pc);
621 } else {
622 #if HAS_APPLE_PAC
623 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
624 "mov x1, %[pc] \n"
625 "str x1, [x0, %[SS64_PC]] \n",
626 [pc] "r"(pc)
627 );
628 #else
629 saved_state64(iss)->pc = (unsigned long)pc;
630 #endif /* HAS_APPLE_PAC */
631 }
632 }
633
634 static inline register_t
get_saved_state_sp(const arm_saved_state_t * iss)635 get_saved_state_sp(const arm_saved_state_t *iss)
636 {
637 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->sp : const_saved_state64(iss)->sp);
638 }
639
640 static inline void
set_saved_state_sp(arm_saved_state_t * iss,register_t sp)641 set_saved_state_sp(arm_saved_state_t *iss, register_t sp)
642 {
643 if (is_saved_state32(iss)) {
644 saved_state32(iss)->sp = CAST_ASSERT_SAFE(uint32_t, sp);
645 } else {
646 saved_state64(iss)->sp = (uint64_t)sp;
647 }
648 }
649
650 static inline register_t
get_saved_state_lr(const arm_saved_state_t * iss)651 get_saved_state_lr(const arm_saved_state_t *iss)
652 {
653 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->lr : const_saved_state64(iss)->lr);
654 }
655
656 #if HAS_SET_SAVED_STATE_LR
657 static inline void
set_saved_state_lr(arm_saved_state_t * iss,register_t lr)658 set_saved_state_lr(arm_saved_state_t *iss, register_t lr)
659 {
660 if (is_saved_state32(iss)) {
661 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
662 } else {
663 #if HAS_APPLE_PAC
664 MANIPULATE_SIGNED_THREAD_STATE(iss,
665 "mov x3, %[lr] \n"
666 "str x3, [x0, %[SS64_LR]] \n",
667 [lr] "r"(lr)
668 );
669 #else
670 saved_state64(iss)->lr = (unsigned long)lr;
671 #endif /* HAS_APPLE_PAC */
672 }
673 }
674 #endif /* HAS_SET_SAVED_STATE_PC */
675
676 static inline void
set_user_saved_state_lr(arm_saved_state_t * iss,register_t lr)677 set_user_saved_state_lr(arm_saved_state_t *iss, register_t lr)
678 {
679 if (is_saved_state32(iss)) {
680 saved_state32(iss)->lr = CAST_ASSERT_SAFE(uint32_t, lr);
681 } else {
682 #if HAS_APPLE_PAC
683 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
684 "mov x3, %[lr] \n"
685 "str x3, [x0, %[SS64_LR]] \n",
686 [lr] "r"(lr)
687 );
688 #else
689 saved_state64(iss)->lr = (unsigned long)lr;
690 #endif /* HAS_APPLE_PAC */
691 }
692 }
693
694 static inline register_t
get_saved_state_fp(const arm_saved_state_t * iss)695 get_saved_state_fp(const arm_saved_state_t *iss)
696 {
697 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->r[7] : const_saved_state64(iss)->fp);
698 }
699
700 static inline void
set_saved_state_fp(arm_saved_state_t * iss,register_t fp)701 set_saved_state_fp(arm_saved_state_t *iss, register_t fp)
702 {
703 if (is_saved_state32(iss)) {
704 saved_state32(iss)->r[7] = CAST_ASSERT_SAFE(uint32_t, fp);
705 } else {
706 saved_state64(iss)->fp = (uint64_t)fp;
707 }
708 }
709
710 static inline int
check_saved_state_reglimit(const arm_saved_state_t * iss,unsigned reg)711 check_saved_state_reglimit(const arm_saved_state_t *iss, unsigned reg)
712 {
713 return is_saved_state32(iss) ? (reg < ARM_SAVED_STATE32_COUNT) : (reg < ARM_SAVED_STATE64_COUNT);
714 }
715
716 static inline register_t
get_saved_state_reg(const arm_saved_state_t * iss,unsigned reg)717 get_saved_state_reg(const arm_saved_state_t *iss, unsigned reg)
718 {
719 if (!check_saved_state_reglimit(iss, reg)) {
720 return 0;
721 }
722
723 return (register_t)(is_saved_state32(iss) ? (const_saved_state32(iss)->r[reg]) : (const_saved_state64(iss)->x[reg]));
724 }
725
726 #if HAS_SET_SAVED_STATE_REG
727 static inline void
set_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)728 set_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
729 {
730 if (!check_saved_state_reglimit(iss, reg)) {
731 return;
732 }
733
734 if (is_saved_state32(iss)) {
735 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
736 } else {
737 #if HAS_APPLE_PAC
738 /* x16 and x17 are part of the jophash */
739 if (reg == 16) {
740 MANIPULATE_SIGNED_THREAD_STATE(iss,
741 "mov x4, %[value] \n"
742 "str x4, [x0, %[SS64_X16]] \n",
743 [value] "r"(value)
744 );
745 return;
746 } else if (reg == 17) {
747 MANIPULATE_SIGNED_THREAD_STATE(iss,
748 "mov x5, %[value] \n"
749 "str x5, [x0, %[SS64_X17]] \n",
750 [value] "r"(value),
751 [SS64_X17] "i"(ss64_offsetof(x[17]))
752 );
753 return;
754 }
755 #endif
756 saved_state64(iss)->x[reg] = (uint64_t)value;
757 }
758 }
759 #endif /* HAS_SET_SAVED_STATE_REG */
760
761 static inline void
set_user_saved_state_reg(arm_saved_state_t * iss,unsigned reg,register_t value)762 set_user_saved_state_reg(arm_saved_state_t *iss, unsigned reg, register_t value)
763 {
764 if (!check_saved_state_reglimit(iss, reg)) {
765 return;
766 }
767
768 if (is_saved_state32(iss)) {
769 saved_state32(iss)->r[reg] = CAST_ASSERT_SAFE(uint32_t, value);
770 } else {
771 #if HAS_APPLE_PAC
772 /* x16 and x17 are part of the jophash */
773 if (reg == 16) {
774 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
775 "mov x4, %[value] \n"
776 "str x4, [x0, %[SS64_X16]] \n",
777 [value] "r"(value)
778 );
779 return;
780 } else if (reg == 17) {
781 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
782 "mov x5, %[value] \n"
783 "str x5, [x0, %[SS64_X17]] \n",
784 [value] "r"(value),
785 [SS64_X17] "i"(ss64_offsetof(x[17]))
786 );
787 return;
788 }
789 #endif
790 saved_state64(iss)->x[reg] = (uint64_t)value;
791 }
792 }
793
794
795 static inline uint32_t
get_saved_state_cpsr(const arm_saved_state_t * iss)796 get_saved_state_cpsr(const arm_saved_state_t *iss)
797 {
798 return is_saved_state32(iss) ? const_saved_state32(iss)->cpsr : const_saved_state64(iss)->cpsr;
799 }
800
801 #if HAS_MASK_SAVED_STATE_CPSR
802 static inline void
mask_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)803 mask_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
804 {
805 if (is_saved_state32(iss)) {
806 saved_state32(iss)->cpsr |= set_bits;
807 saved_state32(iss)->cpsr &= ~clear_bits;
808 } else {
809 #if HAS_APPLE_PAC
810 MANIPULATE_SIGNED_THREAD_STATE(iss,
811 "mov w6, %w[set_bits] \n"
812 "orr w2, w2, w6, lsl #0 \n"
813 "mov w6, %w[clear_bits] \n"
814 "bic w2, w2, w6, lsl #0 \n"
815 "str w2, [x0, %[SS64_CPSR]] \n",
816 [set_bits] "r"(set_bits),
817 [clear_bits] "r"(clear_bits)
818 );
819 #else
820 saved_state64(iss)->cpsr |= set_bits;
821 saved_state64(iss)->cpsr &= ~clear_bits;
822 #endif /* HAS_APPLE_PAC */
823 }
824 }
825 #endif /* HAS_MASK_SAVED_STATE_CPSR */
826
827 static inline void
mask_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t set_bits,uint32_t clear_bits)828 mask_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t set_bits, uint32_t clear_bits)
829 {
830 if (is_saved_state32(iss)) {
831 uint32_t new_cpsr = saved_state32(iss)->cpsr;
832 new_cpsr |= set_bits;
833 new_cpsr &= ~clear_bits;
834 if (!PSR_IS_USER(new_cpsr)) {
835 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
836 }
837 saved_state32(iss)->cpsr = new_cpsr;
838 } else {
839 #if HAS_APPLE_PAC
840 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
841 "mov w6, %w[set_bits] \n"
842 "orr w2, w2, w6, lsl #0 \n"
843 "mov w6, %w[clear_bits] \n"
844 "bic w2, w2, w6, lsl #0 \n"
845 "and w6, w2, %[CPSR_EL_MASK] \n"
846 "cmp w6, %[CPSR_EL0] \n"
847 "b.eq 1f \n"
848 "mov w1, w2 \n"
849 "bl _ml_panic_on_invalid_new_cpsr \n"
850 "brk #0 \n"
851 "1: \n"
852 "str w2, [x0, %[SS64_CPSR]] \n",
853 [set_bits] "r"(set_bits),
854 [clear_bits] "r"(clear_bits)
855 );
856 #else
857 uint32_t new_cpsr = saved_state64(iss)->cpsr;
858 new_cpsr |= set_bits;
859 new_cpsr &= ~clear_bits;
860 if (!PSR64_IS_USER(new_cpsr)) {
861 ml_panic_on_invalid_new_cpsr(iss, new_cpsr);
862 }
863 saved_state64(iss)->cpsr = new_cpsr;
864 #endif /* HAS_APPLE_PAC */
865 }
866 }
867
868 #if HAS_SET_SAVED_STATE_CPSR
869 static inline void
set_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)870 set_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
871 {
872 if (is_saved_state32(iss)) {
873 saved_state32(iss)->cpsr = cpsr;
874 } else {
875 #if HAS_APPLE_PAC
876 MANIPULATE_SIGNED_THREAD_STATE(iss,
877 "mov w2, %w[cpsr] \n"
878 "str w2, [x0, %[SS64_CPSR]] \n",
879 [cpsr] "r"(cpsr)
880 );
881 #else
882 saved_state64(iss)->cpsr = cpsr;
883 #endif /* HAS_APPLE_PAC */
884 }
885 }
886 #endif /* HAS_SET_SAVED_STATE_CPSR */
887
888 static inline void
set_user_saved_state_cpsr(arm_saved_state_t * iss,uint32_t cpsr)889 set_user_saved_state_cpsr(arm_saved_state_t *iss, uint32_t cpsr)
890 {
891 if (is_saved_state32(iss)) {
892 if (!PSR_IS_USER(cpsr)) {
893 ml_panic_on_invalid_new_cpsr(iss, cpsr);
894 }
895 saved_state32(iss)->cpsr = cpsr;
896 } else {
897 #if HAS_APPLE_PAC
898 MANIPULATE_SIGNED_USER_THREAD_STATE(iss,
899 "mov w2, %w[cpsr] \n"
900 "and w6, w2, %[CPSR_EL_MASK] \n"
901 "cmp w6, %[CPSR_EL0] \n"
902 "b.eq 1f \n"
903 "mov w1, w2 \n"
904 "bl _ml_panic_on_invalid_new_cpsr \n"
905 "brk #0 \n"
906 "1: \n"
907 "str w2, [x0, %[SS64_CPSR]] \n",
908 [cpsr] "r"(cpsr)
909 );
910 #else
911 if (!PSR64_IS_USER(cpsr)) {
912 ml_panic_on_invalid_new_cpsr(iss, cpsr);
913 }
914 saved_state64(iss)->cpsr = cpsr;
915 #endif /* HAS_APPLE_PAC */
916 }
917 }
918
919 static inline register_t
get_saved_state_far(const arm_saved_state_t * iss)920 get_saved_state_far(const arm_saved_state_t *iss)
921 {
922 return (register_t)(is_saved_state32(iss) ? const_saved_state32(iss)->far : const_saved_state64(iss)->far);
923 }
924
925 static inline void
set_saved_state_far(arm_saved_state_t * iss,register_t far)926 set_saved_state_far(arm_saved_state_t *iss, register_t far)
927 {
928 if (is_saved_state32(iss)) {
929 saved_state32(iss)->far = CAST_ASSERT_SAFE(uint32_t, far);
930 } else {
931 saved_state64(iss)->far = (uint64_t)far;
932 }
933 }
934
935 static inline uint32_t
get_saved_state_esr(const arm_saved_state_t * iss)936 get_saved_state_esr(const arm_saved_state_t *iss)
937 {
938 return is_saved_state32(iss) ? const_saved_state32(iss)->esr : const_saved_state64(iss)->esr;
939 }
940
941 static inline void
set_saved_state_esr(arm_saved_state_t * iss,uint32_t esr)942 set_saved_state_esr(arm_saved_state_t *iss, uint32_t esr)
943 {
944 if (is_saved_state32(iss)) {
945 saved_state32(iss)->esr = esr;
946 } else {
947 saved_state64(iss)->esr = esr;
948 }
949 }
950
951 static inline uint32_t
get_saved_state_exc(const arm_saved_state_t * iss)952 get_saved_state_exc(const arm_saved_state_t *iss)
953 {
954 return is_saved_state32(iss) ? const_saved_state32(iss)->exception : const_saved_state64(iss)->exception;
955 }
956
957 static inline void
set_saved_state_exc(arm_saved_state_t * iss,uint32_t exc)958 set_saved_state_exc(arm_saved_state_t *iss, uint32_t exc)
959 {
960 if (is_saved_state32(iss)) {
961 saved_state32(iss)->exception = exc;
962 } else {
963 saved_state64(iss)->exception = exc;
964 }
965 }
966
967 extern void panic_unimplemented(void);
968
969 /**
970 * Extracts the SVC (Supervisor Call) number from the appropriate GPR (General
971 * Purpose Register).
972 *
973 * @param[in] iss the 32-bit or 64-bit ARM saved state (i.e. trap frame).
974 *
975 * @return The SVC number.
976 */
977 static inline int
get_saved_state_svc_number(const arm_saved_state_t * iss)978 get_saved_state_svc_number(const arm_saved_state_t *iss)
979 {
980 return is_saved_state32(iss) ? (int)const_saved_state32(iss)->r[12] : (int)const_saved_state64(iss)->x[ARM64_SYSCALL_CODE_REG_NUM]; /* Only first word counts here */
981 }
982
983 typedef _STRUCT_ARM_LEGACY_DEBUG_STATE arm_legacy_debug_state_t;
984
985 struct arm_debug_aggregate_state {
986 arm_state_hdr_t dsh;
987 union {
988 arm_debug_state32_t ds32;
989 arm_debug_state64_t ds64;
990 } uds;
991 os_refcnt_t ref;
992 } __attribute__((aligned(16)));
993
994 typedef struct arm_debug_aggregate_state arm_debug_state_t;
995
996 #define ARM_LEGACY_DEBUG_STATE_COUNT ((mach_msg_type_number_t) \
997 (sizeof (arm_legacy_debug_state_t)/sizeof(uint32_t)))
998
999 /*
1000 * NEON context
1001 */
1002 typedef __uint128_t uint128_t;
1003 typedef uint64_t uint64x2_t __attribute__((ext_vector_type(2)));
1004 typedef uint32_t uint32x4_t __attribute__((ext_vector_type(4)));
1005
1006 struct arm_neon_saved_state32 {
1007 union {
1008 uint128_t q[16];
1009 uint64_t d[32];
1010 uint32_t s[32];
1011 } v;
1012 uint32_t fpsr;
1013 uint32_t fpcr;
1014 };
1015 typedef struct arm_neon_saved_state32 arm_neon_saved_state32_t;
1016
1017 #define ARM_NEON_SAVED_STATE32_COUNT ((mach_msg_type_number_t) \
1018 (sizeof (arm_neon_saved_state32_t)/sizeof(unsigned int)))
1019
1020 struct arm_neon_saved_state64 {
1021 union {
1022 uint128_t q[32];
1023 uint64x2_t d[32];
1024 uint32x4_t s[32];
1025 } v;
1026 uint32_t fpsr;
1027 uint32_t fpcr;
1028 };
1029 typedef struct arm_neon_saved_state64 arm_neon_saved_state64_t;
1030
1031 #define ARM_NEON_SAVED_STATE64_COUNT ((mach_msg_type_number_t) \
1032 (sizeof (arm_neon_saved_state64_t)/sizeof(unsigned int)))
1033
1034 struct arm_neon_saved_state {
1035 arm_state_hdr_t nsh;
1036 union {
1037 struct arm_neon_saved_state32 ns_32;
1038 struct arm_neon_saved_state64 ns_64;
1039 } uns;
1040 };
1041 typedef struct arm_neon_saved_state arm_neon_saved_state_t;
1042 #define ns_32 uns.ns_32
1043 #define ns_64 uns.ns_64
1044
1045 struct arm_kernel_neon_saved_state {
1046 uint64_t d[8];
1047 uint32_t fpcr;
1048 };
1049 typedef struct arm_kernel_neon_saved_state arm_kernel_neon_saved_state_t;
1050
1051 static inline boolean_t
is_neon_saved_state32(const arm_neon_saved_state_t * state)1052 is_neon_saved_state32(const arm_neon_saved_state_t *state)
1053 {
1054 return state->nsh.flavor == ARM_NEON_SAVED_STATE32;
1055 }
1056
1057 static inline boolean_t
is_neon_saved_state64(const arm_neon_saved_state_t * state)1058 is_neon_saved_state64(const arm_neon_saved_state_t *state)
1059 {
1060 return state->nsh.flavor == ARM_NEON_SAVED_STATE64;
1061 }
1062
1063 static inline arm_neon_saved_state32_t *
neon_state32(arm_neon_saved_state_t * state)1064 neon_state32(arm_neon_saved_state_t *state)
1065 {
1066 return &state->ns_32;
1067 }
1068
1069 static inline arm_neon_saved_state64_t *
neon_state64(arm_neon_saved_state_t * state)1070 neon_state64(arm_neon_saved_state_t *state)
1071 {
1072 return &state->ns_64;
1073 }
1074
1075
1076
1077 /*
1078 * Aggregated context
1079 */
1080
1081 struct arm_context {
1082 struct arm_saved_state ss;
1083 struct arm_neon_saved_state ns;
1084 };
1085 typedef struct arm_context arm_context_t;
1086
1087 struct arm_kernel_context {
1088 struct arm_kernel_saved_state ss;
1089 struct arm_kernel_neon_saved_state ns;
1090 };
1091 typedef struct arm_kernel_context arm_kernel_context_t;
1092
1093 extern void saved_state_to_thread_state64(const arm_saved_state_t*, arm_thread_state64_t*);
1094 extern void thread_state64_to_saved_state(const arm_thread_state64_t*, arm_saved_state_t*);
1095
1096 #else /* defined(__arm64__) */
1097 #error Unknown arch
1098 #endif /* defined(__arm64__) */
1099
1100 extern void saved_state_to_thread_state32(const arm_saved_state_t*, arm_thread_state32_t*);
1101 extern void thread_state32_to_saved_state(const arm_thread_state32_t*, arm_saved_state_t*);
1102
1103 #endif /* XNU_KERNEL_PRIVATE */
1104
1105 #endif /* defined (__arm__) || defined (__arm64__) */
1106
1107 #endif /* _ARM_THREAD_STATUS_H_ */
1108