1 /*
2 * Copyright (c) 2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 *
31 */
32
33 #ifndef ARM_CPU_DATA
34 #define ARM_CPU_DATA
35
36 #ifdef MACH_KERNEL_PRIVATE
37
38 #include <mach_assert.h>
39 #include <kern/assert.h>
40 #include <kern/kern_types.h>
41 #include <kern/processor.h>
42 #include <pexpert/pexpert.h>
43 #include <arm/thread.h>
44 #include <arm64/proc_reg.h>
45
46 #include <mach/mach_types.h>
47 #include <machine/thread.h>
48
49 __ASSUME_PTR_ABI_SINGLE_BEGIN
50
51
52 #ifndef __BUILDING_XNU_LIB_UNITTEST__
53 static inline __attribute__((const)) thread_t
current_thread_fast(void)54 current_thread_fast(void)
55 {
56 #if defined(__arm64__)
57 /*
58 * rdar://73762648 clang nowadays insists that this is not constant
59 *
60 * __builtin_arm_rsr64("TPIDR_EL1")
61 *
62 * and ignores the "attribute const", so do it the "dumb" way.
63 */
64 unsigned long result;
65 __asm__ ("mrs %0, TPIDR_EL1" : "=r" (result));
66 return __unsafe_forge_single(thread_t, result);
67 #else
68 // TPIDRPRW
69 return __unsafe_forge_single(thread_t, __builtin_arm_mrc(15, 0, 13, 0, 4));
70 #endif
71 }
72 #else /* __BUILDING_XNU_LIB_UNITTEST__ */
73 __attribute__((const)) thread_t current_thread_fast(void);
74 #endif /* __BUILDING_XNU_LIB_UNITTEST__ */
75
76
77 /*
78 * The "volatile" flavor of current_thread() is intended for use by
79 * scheduler code which may need to update the thread pointer in the
80 * course of a context switch. Any call to current_thread() made
81 * prior to the thread pointer update should be safe to optimize away
82 * as it should be consistent with that thread's state to the extent
83 * the compiler can reason about it. Likewise, the context switch
84 * path will eventually result in an arbitrary branch to the new
85 * thread's pc, about which the compiler won't be able to reason.
86 * Thus any compile-time optimization of current_thread() calls made
87 * within the new thread should be safely encapsulated in its
88 * register/stack state. The volatile form therefore exists to cover
89 * the window between the thread pointer update and the branch to
90 * the new pc.
91 */
92 static inline thread_t
current_thread_volatile(void)93 current_thread_volatile(void)
94 {
95 /*
96 * The compiler might decide to treat rsr64 as const (comes and goes),
97 * which can allow it to eliminate redundant calls, which we don't want
98 * here. Thus we use volatile asm. Which gives us control on semantics.
99 *
100 * The mrc used for arm32 should be treated as volatile however.
101 */
102 #if defined(__arm64__)
103 unsigned long result;
104 __asm__ volatile ("mrs %0, TPIDR_EL1" : "=r" (result));
105 return __unsafe_forge_single(thread_t, result);
106 #else
107 // TPIDRPRW
108 return __unsafe_forge_single(thread_t, __builtin_arm_mrc(15, 0, 13, 0, 4));
109 #endif
110 }
111
112 #if defined(__arm64__)
113
114 static inline vm_offset_t
exception_stack_pointer(void)115 exception_stack_pointer(void)
116 {
117 vm_offset_t result = 0;
118 __asm__ volatile (
119 "msr SPSel, #1 \n"
120 "mov %0, sp \n"
121 "msr SPSel, #0 \n"
122 : "=r" (result));
123
124 return result;
125 }
126
127 #endif /* defined(__arm64__) */
128
129 #define getCpuDatap() current_thread()->machine.CpuDatap
130 #define current_cpu_datap() getCpuDatap()
131
132 extern int get_preemption_level(void);
133 extern unsigned int get_preemption_level_for_thread(thread_t);
134
135 #define mp_disable_preemption() _disable_preemption()
136 #define mp_enable_preemption() _enable_preemption()
137
138 __ASSUME_PTR_ABI_SINGLE_END
139
140 #endif /* MACH_KERNEL_PRIVATE */
141
142 #endif /* ARM_CPU_DATA */
143