xref: /xnu-8792.61.2/osfmk/kern/simple_lock.h (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (C) 1998 Apple Computer
30  * All Rights Reserved
31  */
32 /*
33  * @OSF_COPYRIGHT@
34  */
35 /*
36  * Mach Operating System
37  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
38  * All Rights Reserved.
39  *
40  * Permission to use, copy, modify and distribute this software and its
41  * documentation is hereby granted, provided that both the copyright
42  * notice and this permission notice appear in all copies of the
43  * software, derivative works or modified versions, and any portions
44  * thereof, and that both notices appear in supporting documentation.
45  *
46  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
47  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
48  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
49  *
50  * Carnegie Mellon requests users of this software to return to
51  *
52  *  Software Distribution Coordinator  or  [email protected]
53  *  School of Computer Science
54  *  Carnegie Mellon University
55  *  Pittsburgh PA 15213-3890
56  *
57  * any improvements or extensions that they make and grant Carnegie Mellon
58  * the rights to redistribute these changes.
59  */
60 /*
61  *	File:	kern/simple_lock.h (derived from kern/lock.h)
62  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
63  *	Date:	1985
64  *
65  *	Atomic primitives and Simple Locking primitives definitions
66  */
67 
68 #ifdef  KERNEL_PRIVATE
69 
70 #ifndef _KERN_SIMPLE_LOCK_H_
71 #define _KERN_SIMPLE_LOCK_H_
72 
73 #include <mach/boolean.h>
74 #include <kern/lock_types.h>
75 #include <kern/lock_group.h>
76 #include <machine/simple_lock.h>
77 
78 #ifdef XNU_KERNEL_PRIVATE
79 
80 #if MACH_KERNEL_PRIVATE
81 #include <machine/atomic.h>
82 #include <mach_ldebug.h>
83 #endif
84 
85 __BEGIN_DECLS
86 
87 #pragma GCC visibility push(hidden)
88 
89 #ifdef MACH_KERNEL_PRIVATE
90 
91 #define HW_LOCK_STATE_TO_THREAD(state)  ((thread_t)(state))
92 #define HW_LOCK_THREAD_TO_STATE(thread) ((uintptr_t)(thread))
93 
94 extern void                     hw_lock_init(
95 	hw_lock_t);
96 
97 extern void                     hw_lock_lock(
98 	hw_lock_t
99 	LCK_GRP_ARG(lck_grp_t*));
100 
101 extern void                     hw_lock_lock_nopreempt(
102 	hw_lock_t
103 	LCK_GRP_ARG(lck_grp_t*));
104 
105 extern unsigned int             hw_lock_to(
106 	hw_lock_t,
107 	hw_spin_policy_t
108 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
109 
110 extern unsigned int             hw_lock_to_nopreempt(
111 	hw_lock_t,
112 	hw_spin_policy_t
113 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
114 
115 extern unsigned int             hw_lock_try(
116 	hw_lock_t
117 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
118 
119 extern unsigned int             hw_lock_try_nopreempt(
120 	hw_lock_t
121 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
122 
123 #if !LCK_GRP_USE_ARG
124 #define hw_lock_lock(lck, grp) \
125 	hw_lock_lock(lck)
126 
127 #define hw_lock_lock_nopreempt(lck, grp) \
128 	hw_lock_lock_nopreempt(lck)
129 
130 #define hw_lock_to(lck, spec, grp) \
131 	hw_lock_to(lck, spec)
132 
133 #define hw_lock_to_nopreempt(lck, spec, grp) \
134 	hw_lock_to_nopreempt(lck, spec)
135 
136 #define hw_lock_try(lck, grp) \
137 	hw_lock_try(lck)
138 
139 #define hw_lock_try_nopreempt(lck, grp) \
140 	hw_lock_try_nopreempt(lck)
141 #endif /* !LCK_GRP_USE_ARG */
142 
143 extern void                     hw_lock_unlock(
144 	hw_lock_t);
145 
146 extern void                     hw_lock_unlock_nopreempt(
147 	hw_lock_t);
148 
149 extern unsigned int             hw_lock_held(
150 	hw_lock_t) __result_use_check;
151 
152 extern boolean_t                hw_atomic_test_and_set32(
153 	uint32_t *target,
154 	uint32_t test_mask,
155 	uint32_t set_mask,
156 	enum memory_order ord,
157 	boolean_t wait);
158 
159 extern boolean_t                atomic_test_and_set32(
160 	uint32_t *target,
161 	uint32_t test_mask,
162 	uint32_t set_mask,
163 	enum memory_order ord,
164 	boolean_t wait);
165 
166 extern void                     atomic_exchange_abort(
167 	void);
168 
169 extern boolean_t                atomic_exchange_complete32(
170 	uint32_t *target,
171 	uint32_t previous,
172 	uint32_t newval,
173 	enum memory_order ord);
174 
175 extern uint32_t                 atomic_exchange_begin32(
176 	uint32_t *target,
177 	uint32_t *previous,
178 	enum memory_order ord);
179 
180 #if defined(__arm__) || defined(__arm64__)
181 uint32_t                        load_exclusive32(
182 	uint32_t *target,
183 	enum memory_order ord);
184 boolean_t                       store_exclusive32(
185 	uint32_t *target,
186 	uint32_t value,
187 	enum memory_order ord);
188 #endif /* defined(__arm__)||defined(__arm64__) */
189 
190 extern void                     usimple_unlock_nopreempt(
191 	usimple_lock_t);
192 
193 extern hw_spin_timeout_t hw_spin_compute_timeout(
194 	hw_spin_policy_t         policy);
195 
196 extern bool hw_spin_in_ppl(
197 	hw_spin_timeout_t       to) __pure2;
198 
199 extern bool hw_spin_should_keep_spinning(
200 	void                   *lock,
201 	hw_spin_policy_t        policy,
202 	hw_spin_timeout_t       to,
203 	hw_spin_state_t        *state);
204 
205 #endif /* MACH_KERNEL_PRIVATE */
206 
207 struct usimple_lock_startup_spec {
208 	usimple_lock_t  lck;
209 	unsigned short  lck_init_arg;
210 };
211 
212 extern void                     usimple_lock_startup_init(
213 	struct usimple_lock_startup_spec *spec);
214 
215 #define SIMPLE_LOCK_DECLARE(var, arg) \
216 	decl_simple_lock_data(, var); \
217 	static __startup_data struct usimple_lock_startup_spec \
218 	__startup_usimple_lock_spec_ ## var = { &var, arg }; \
219 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, usimple_lock_startup_init, \
220 	    &__startup_usimple_lock_spec_ ## var)
221 
222 extern uint32_t hw_wait_while_equals32(
223 	uint32_t               *address,
224 	uint32_t                current);
225 
226 extern uint64_t hw_wait_while_equals64(
227 	uint64_t               *address,
228 	uint64_t                current);
229 
230 #if __LP64__
231 #define hw_wait_while_equals_long(ptr, cur) ({ \
232 	static_assert(sizeof(*(ptr)) == sizeof(long)); \
233 	(typeof(cur))hw_wait_while_equals64(__DEVOLATILE(uint64_t *, ptr), (uint64_t)(cur)); \
234 })
235 #else
236 #define hw_wait_while_equals_long(ptr, cur) ({ \
237 	static_assert(sizeof(*(ptr)) == sizeof(long)); \
238 	(typeof(cur))hw_wait_while_equals32(__DEVOLATILE(uint32_t *, ptr), (uint32_t)(cur)); \
239 })
240 #endif
241 
242 
243 extern void                     usimple_lock_init(
244 	usimple_lock_t,
245 	unsigned short);
246 
247 extern void                     usimple_lock(
248 	usimple_lock_t
249 	LCK_GRP_ARG(lck_grp_t*));
250 
251 extern unsigned int             usimple_lock_try(
252 	usimple_lock_t
253 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
254 
255 extern void             usimple_lock_try_lock_loop(
256 	usimple_lock_t
257 	LCK_GRP_ARG(lck_grp_t*));
258 
259 #if defined(__x86_64__)
260 extern unsigned int     usimple_lock_try_lock_mp_signal_safe_loop_deadline(
261 	usimple_lock_t,
262 	uint64_t
263 	LCK_GRP_ARG(lck_grp_t*)) /* __result_use_check */;
264 
265 extern unsigned int     usimple_lock_try_lock_mp_signal_safe_loop_duration(
266 	usimple_lock_t,
267 	uint64_t
268 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
269 #endif
270 
271 extern void                     usimple_unlock(
272 	usimple_lock_t);
273 
274 #if !LCK_GRP_USE_ARG
275 #define usimple_lock(lck, grp) \
276 	usimple_lock(lck)
277 
278 #define usimple_lock_try(lck, grp) \
279 	usimple_lock_try(lck)
280 
281 #define usimple_lock_try_lock_loop(lck, grp) \
282 	usimple_lock_try_lock_loop(lck)
283 
284 #if defined(__x86_64__)
285 #define usimple_lock_try_lock_mp_signal_safe_loop_deadline(lck, ddl, grp) \
286 	usimple_lock_try_lock_mp_signal_safe_loop_deadline(lck, ddl)
287 #define usimple_lock_try_lock_mp_signal_safe_loop_duration(lck, dur, grp) \
288 	usimple_lock_try_lock_mp_signal_safe_loop_duration(lck, dur)
289 #endif
290 #endif /* !LCK_GRP_USE_ARG */
291 
292 
293 /*
294  * If we got to here and we still don't have simple_lock_init
295  * defined, then we must either be outside the osfmk component,
296  * running on a true SMP, or need debug.
297  */
298 #if !defined(simple_lock_init)
299 #define simple_lock_init(l, t)               usimple_lock_init(l,t)
300 #define simple_lock(l, grp)                  usimple_lock(l, grp)
301 #define simple_unlock(l)                     usimple_unlock(l)
302 #define simple_lock_try(l, grp)              usimple_lock_try(l, grp)
303 #define simple_lock_try_lock_loop(l, grp)    usimple_lock_try_lock_loop(l, grp)
304 #define simple_lock_try_lock_mp_signal_safe_loop_deadline(l, ddl, grp) \
305 	usimple_lock_try_lock_mp_signal_safe_loop_deadline(l, ddl, grp)
306 #define simple_lock_try_lock_mp_signal_safe_loop_duration(l, dur, grp) \
307 	usimple_lock_try_lock_mp_signal_safe_loop_duration(l, dur, grp)
308 #define simple_lock_addr(l)     (&(l))
309 #endif /* !defined(simple_lock_init) */
310 
311 #ifdef MACH_KERNEL_PRIVATE
312 
313 typedef uint32_t hw_lock_bit_t;
314 
315 #if __arm64__
316 extern const struct hw_spin_policy hw_lock_bit_policy_2s;
317 #endif
318 extern const struct hw_spin_policy hw_lock_spin_policy;
319 extern const struct hw_spin_policy hw_lock_spin_panic_policy;
320 #if DEBUG || DEVELOPMENT
321 extern const struct hw_spin_policy hw_lock_test_give_up_policy;
322 #endif /* DEBUG || DEVELOPMENT */
323 
324 extern void     hw_lock_bit(
325 	hw_lock_bit_t *,
326 	unsigned int
327 	LCK_GRP_ARG(lck_grp_t*));
328 
329 extern void     hw_lock_bit_nopreempt(
330 	hw_lock_bit_t *,
331 	unsigned int
332 	LCK_GRP_ARG(lck_grp_t*));
333 
334 
335 extern unsigned int hw_lock_bit_try(
336 	hw_lock_bit_t *,
337 	unsigned int
338 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
339 
340 extern unsigned int hw_lock_bit_to(
341 	hw_lock_bit_t *,
342 	unsigned int,
343 	hw_spin_policy_t
344 	LCK_GRP_ARG(lck_grp_t*)) __result_use_check;
345 
346 extern void     hw_unlock_bit(
347 	hw_lock_bit_t *,
348 	unsigned int);
349 
350 extern void     hw_unlock_bit_nopreempt(
351 	hw_lock_bit_t *,
352 	unsigned int);
353 
354 #define hw_lock_bit_held(l, b) \
355 	(((*(l)) & (1 << (b))) != 0)
356 
357 #if !LCK_GRP_USE_ARG
358 #define hw_lock_bit(lck, bit, grp) \
359 	hw_lock_bit(lck, bit)
360 
361 #define hw_lock_bit_nopreempt(lck, bit, grp) \
362 	hw_lock_bit_nopreempt(lck, bit)
363 
364 
365 #define hw_lock_bit_try(lck, bit, grp) \
366 	hw_lock_bit_try(lck, bit)
367 
368 #define hw_lock_bit_to(lck, bit, spec, grp) \
369 	hw_lock_bit_to(lck, bit, spec)
370 
371 #endif /* !LCK_GRP_USE_ARG */
372 #endif  /* MACH_KERNEL_PRIVATE */
373 
374 __END_DECLS
375 
376 #pragma GCC visibility pop
377 
378 #endif /* XNU_KERNEL_PRIVATE */
379 #endif /*!_KERN_SIMPLE_LOCK_H_*/
380 
381 #endif  /* KERNEL_PRIVATE */
382