1 /*
2 * Copyright (c) 2007-2017 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _ARM_LOCKS_H_
30 #define _ARM_LOCKS_H_
31
32 #include <kern/kern_types.h>
33 #ifdef MACH_KERNEL_PRIVATE
34 #include <kern/sched_hygiene.h>
35 #include <kern/startup.h>
36 #include <arm/hw_lock_types.h>
37 #endif
38
39 #ifdef MACH_KERNEL_PRIVATE
40 #if DEBUG || DEVELOPMENT
41 #define LOCKS_INDIRECT_ALLOW 1
42 #else
43 #define LOCKS_INDIRECT_ALLOW 0
44 #endif
45
46 #define enaLkDeb 0x00000001 /* Request debug in default attribute */
47 #define enaLkStat 0x00000002 /* Request statistic in default attribute */
48 #define disLkRWPrio 0x00000004 /* Disable RW lock priority promotion */
49 #define enaLkTimeStat 0x00000008 /* Request time statistics in default attribute */
50 #define disLkRWDebug 0x00000010 /* Disable RW lock best-effort debugging */
51
52 #define disLkType 0x80000000 /* Disable type checking */
53 #define disLktypeb 0
54 #define disLkThread 0x40000000 /* Disable ownership checking */
55 #define disLkThreadb 1
56 #define enaLkExtStck 0x20000000 /* Enable extended backtrace */
57 #define enaLkExtStckb 2
58 #define disLkMyLck 0x10000000 /* Disable recursive lock dectection */
59 #define disLkMyLckb 3
60
61 #endif
62
63 #ifdef MACH_KERNEL_PRIVATE
64 typedef struct {
65 struct hslock hwlock;
66 uintptr_t type __kernel_data_semantics;
67 } lck_spin_t;
68
69 #define lck_spin_data hwlock.lock_data
70
71 #define LCK_SPIN_TAG_DESTROYED 0xdead /* lock marked as Destroyed */
72
73 #define LCK_SPIN_TYPE 0x00000011
74
75 #else
76 #ifdef KERNEL_PRIVATE
77
78 typedef struct {
79 uintptr_t opaque[2] __kernel_data_semantics;
80 } lck_spin_t;
81
82 #else
83 typedef struct __lck_spin_t__ lck_spin_t;
84 #endif // KERNEL_PRIVATE
85 #endif // MACH_KERNEL_PRIVATE
86
87 #ifdef MACH_KERNEL_PRIVATE
88 typedef struct _lck_mtx_ {
89 /*
90 * The mtx_data which holds a thread_t can be "data semantics"
91 * because any dereference of it that leads to mutation
92 * will zone_id_require() that it is indeed a proper thread
93 * from the thread zone.
94 *
95 * This allows us to leave pure data with a lock into
96 * the kalloc data heap.
97 */
98 union {
99 uintptr_t lck_mtx_data __kernel_data_semantics; /* Thread pointer plus lock bits */
100 uintptr_t lck_mtx_tag __kernel_data_semantics; /* Tag for type */
101 }; /* arm: 4 arm64: 8 */
102 union {
103 struct {
104 uint16_t lck_mtx_waiters;/* Number of waiters */
105 uint8_t lck_mtx_pri; /* unused */
106 uint8_t lck_mtx_type; /* Type */
107 };
108 #if LOCKS_INDIRECT_ALLOW
109 struct {
110 /* Marked as data as it is only dereferenced under LCK_ATTR_DEBUG */
111 struct _lck_mtx_ext_ *lck_mtx_ptr __kernel_data_semantics; /* Indirect pointer */
112 };
113 #endif /* LOCKS_INDIRECT_ALLOW */
114 }; /* arm: 4 arm64: 8 */
115 } lck_mtx_t; /* arm: 8 arm64: 16 */
116
117 /* Shared between mutex and read-write locks */
118 #define LCK_ILOCK_BIT 0
119 #define ARM_LCK_WAITERS_BIT 1
120 #define LCK_ILOCK (1 << LCK_ILOCK_BIT)
121 #define ARM_LCK_WAITERS (1 << ARM_LCK_WAITERS_BIT)
122
123 #define LCK_MTX_TYPE 0x22 /* lock type */
124
125 #if LOCKS_INDIRECT_ALLOW
126 #define LCK_MTX_TAG_INDIRECT 0x00001007 /* lock marked as Indirect */
127 #endif /* LOCKS_INDIRECT_ALLOW */
128 #define LCK_MTX_TAG_DESTROYED 0x00002007 /* lock marked as Destroyed */
129
130 #define LCK_FRAMES_MAX 8
131
132 extern machine_timeout32_t MutexSpin;
133 extern uint64_t low_MutexSpin;
134 extern int64_t high_MutexSpin;
135
136 typedef struct {
137 unsigned int type;
138 vm_offset_t stack[LCK_FRAMES_MAX];
139 vm_offset_t thread;
140 } lck_mtx_deb_t;
141
142 #define MUTEX_TAG 0x4d4d
143
144 typedef struct {
145 unsigned int lck_mtx_stat_data;
146 } lck_mtx_stat_t;
147
148 typedef struct _lck_mtx_ext_ {
149 lck_mtx_t lck_mtx; /* arm: 12 arm64: 24 */
150 struct _lck_grp_ *lck_mtx_grp; /* arm: 4 arm64: 8 */
151 unsigned int lck_mtx_attr; /* arm: 4 arm64: 4 */
152 lck_mtx_stat_t lck_mtx_stat; /* arm: 4 arm64: 4 */
153 lck_mtx_deb_t lck_mtx_deb; /* arm: 40 arm64: 80 */
154 } lck_mtx_ext_t; /* arm: 64 arm64: 120 */
155
156 #define LCK_MTX_ATTR_DEBUG 0x1
157 #define LCK_MTX_ATTR_DEBUGb 31
158 #define LCK_MTX_ATTR_STAT 0x2
159 #define LCK_MTX_ATTR_STATb 30
160
161 #define LCK_MTX_EVENT(lck) ((event_t)(((unsigned int*)(lck))+((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
162 #define LCK_EVENT_TO_MUTEX(event) ((lck_mtx_t *)(uintptr_t)(((unsigned int *)(event)) - ((sizeof(lck_mtx_t)-1)/sizeof(unsigned int))))
163
164 #else
165 #ifdef KERNEL_PRIVATE
166 typedef struct {
167 uintptr_t opaque[2] __kernel_data_semantics;
168 } lck_mtx_t;
169
170 typedef struct {
171 #if defined(__arm64__)
172 unsigned long opaque[16];
173 #else /* __arm__ */
174 unsigned int opaque[16];
175 #endif
176 } lck_mtx_ext_t;
177
178 #else
179 typedef struct __lck_mtx_t__ lck_mtx_t;
180 #endif
181 #endif
182
183 #ifdef MACH_KERNEL_PRIVATE
184
185 #define wait_for_event() __builtin_arm_wfe()
186 #if __arm__
187 #define set_event() do{__builtin_arm_dsb(DSB_ISHST);__builtin_arm_sev();}while(0)
188 #define LOCK_SNOOP_SPINS 4
189 #else
190 #define set_event() do{}while(0) // arm64 sev is implicit in stlxr
191 #define LOCK_SNOOP_SPINS 0x300
192 #endif
193
194 #if LOCK_PRIVATE
195
196 extern machine_timeout32_t lock_panic_timeout;
197
198 #define PLATFORM_LCK_ILOCK LCK_ILOCK
199
200 #if defined(__ARM_ARCH_8_2__)
201 #define __ARM_ATOMICS_8_1 1 // ARMv8.1 atomic instructions are available
202 #endif
203
204 /*
205 * Lock state to thread pointer
206 * Clear the bottom bits
207 */
208 #define LCK_MTX_STATE_TO_THREAD(s) (thread_t)(s & ~(LCK_ILOCK | ARM_LCK_WAITERS))
209 /*
210 * Thread pointer to lock state
211 * arm thread pointers are aligned such that the bottom two bits are clear
212 */
213 #define LCK_MTX_THREAD_TO_STATE(t) ((uintptr_t)t)
214 /*
215 * Thread pointer mask
216 */
217 #define LCK_MTX_THREAD_MASK (~(uintptr_t)(LCK_ILOCK | ARM_LCK_WAITERS))
218
219 #if SCHED_PREEMPTION_DISABLE_DEBUG
220
221 #define lock_disable_preemption_for_thread(t) \
222 do { \
223 unsigned int const count = t->machine.preemption_count; \
224 os_atomic_store(&(t->machine.preemption_count), t->machine.preemption_count + 1, compiler_acq_rel); \
225 \
226 if (count == 0 && sched_preemption_disable_debug_mode) { \
227 _prepare_preemption_disable_measurement(t); \
228 } \
229 } while (0);
230
231 #else /* SCHED_PREEMPTION_DISABLE_DEBUG */
232
233 #define lock_disable_preemption_for_thread(t) \
234 os_atomic_store(&(t->machine.preemption_count), t->machine.preemption_count + 1, compiler_acq_rel)
235
236 #endif /* SCHED_PREEMPTION_DISABLE_DEBUG */
237
238 #define lock_enable_preemption enable_preemption
239 #define lock_preemption_disabled_for_thread(t) (t->machine.preemption_count > 0)
240
241 __unused static void
disable_interrupts_noread(void)242 disable_interrupts_noread(void)
243 {
244 #if __arm__
245 __asm__ volatile ("cpsid if" ::: "memory"); // Mask IRQ FIQ
246 #else
247 __builtin_arm_wsr64("DAIFSet", DAIFSC_STANDARD_DISABLE); // Mask IRQ FIQ ASYNCF
248 #endif
249 }
250
251 __unused static inline long
get_interrupts(void)252 get_interrupts(void)
253 {
254 long state;
255
256 #if __arm__
257 __asm__ volatile ("mrs %[state], cpsr" :[state] "=r" (state)); // Read cpsr
258 #else
259 state = (long)__builtin_arm_rsr64("DAIF"); // Read interrupt state
260 #endif
261 return state;
262 }
263
264 __unused static inline long
disable_interrupts(void)265 disable_interrupts(void)
266 {
267 long state;
268
269 state = get_interrupts(); // Get previous state
270 disable_interrupts_noread(); // Disable
271 return state;
272 }
273
274 __unused static inline void
restore_interrupts(long state)275 restore_interrupts(long state)
276 {
277 #if __arm__
278 __asm__ volatile ("msr cpsr, %[state]" :: [state] "r" (state) : "cc", "memory"); // Restore CPSR
279 #elif __arm64__
280 __builtin_arm_wsr64("DAIF", (uint64_t)state); // Restore masks
281 #endif
282 }
283
284 #endif // LOCK_PRIVATE
285
286 #endif // MACH_KERNEL_PRIVATE
287
288 #endif /* _ARM_LOCKS_H_ */
289