xref: /xnu-8020.140.41/osfmk/kern/locks.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_LOCKS_H_
30 #define _KERN_LOCKS_H_
31 
32 #include <sys/cdefs.h>
33 
34 #ifdef  XNU_KERNEL_PRIVATE
35 #include <kern/startup.h>
36 #include <kern/percpu.h>
37 #endif /* XNU_KERNEL_PRIVATE */
38 
39 __BEGIN_DECLS
40 
41 #include <sys/appleapiopts.h>
42 #include <mach/boolean.h>
43 #include <kern/kern_types.h>
44 #include <kern/lock_group.h>
45 #include <machine/locks.h>
46 #ifdef KERNEL_PRIVATE
47 #include <kern/ticket_lock.h>
48 #endif
49 #include <kern/lock_types.h>
50 #include <kern/lock_attr.h>
51 #include <kern/lock_rw.h>
52 
53 #define decl_lck_spin_data(class, name)     class lck_spin_t name
54 
55 extern lck_spin_t      *lck_spin_alloc_init(
56 	lck_grp_t               *grp,
57 	lck_attr_t              *attr);
58 
59 extern void             lck_spin_init(
60 	lck_spin_t              *lck,
61 	lck_grp_t               *grp,
62 	lck_attr_t              *attr);
63 
64 extern void             lck_spin_lock(
65 	lck_spin_t              *lck);
66 
67 extern void             lck_spin_lock_grp(
68 	lck_spin_t              *lck,
69 	lck_grp_t               *grp);
70 
71 extern void             lck_spin_unlock(
72 	lck_spin_t              *lck);
73 
74 extern void             lck_spin_destroy(
75 	lck_spin_t              *lck,
76 	lck_grp_t               *grp);
77 
78 extern void             lck_spin_free(
79 	lck_spin_t              *lck,
80 	lck_grp_t               *grp);
81 
82 extern wait_result_t    lck_spin_sleep(
83 	lck_spin_t              *lck,
84 	lck_sleep_action_t      lck_sleep_action,
85 	event_t                 event,
86 	wait_interrupt_t        interruptible);
87 
88 extern wait_result_t    lck_spin_sleep_grp(
89 	lck_spin_t              *lck,
90 	lck_sleep_action_t      lck_sleep_action,
91 	event_t                 event,
92 	wait_interrupt_t        interruptible,
93 	lck_grp_t               *grp);
94 
95 extern wait_result_t    lck_spin_sleep_deadline(
96 	lck_spin_t              *lck,
97 	lck_sleep_action_t      lck_sleep_action,
98 	event_t                 event,
99 	wait_interrupt_t        interruptible,
100 	uint64_t                deadline);
101 
102 #ifdef  KERNEL_PRIVATE
103 
104 extern void             lck_spin_lock_nopreempt(
105 	lck_spin_t              *lck);
106 
107 extern void             lck_spin_lock_nopreempt_grp(
108 	lck_spin_t              *lck, lck_grp_t *grp);
109 
110 extern void             lck_spin_unlock_nopreempt(
111 	lck_spin_t              *lck);
112 
113 extern boolean_t        lck_spin_try_lock_grp(
114 	lck_spin_t              *lck,
115 	lck_grp_t               *grp);
116 
117 extern boolean_t        lck_spin_try_lock(
118 	lck_spin_t              *lck);
119 
120 extern boolean_t        lck_spin_try_lock_nopreempt(
121 	lck_spin_t              *lck);
122 
123 extern boolean_t        lck_spin_try_lock_nopreempt_grp(
124 	lck_spin_t              *lck,
125 	lck_grp_t               *grp);
126 
127 /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */
128 extern boolean_t        kdp_lck_spin_is_acquired(
129 	lck_spin_t              *lck);
130 
131 struct _lck_mtx_ext_;
132 extern void lck_mtx_init_ext(
133 	lck_mtx_t               *lck,
134 	struct _lck_mtx_ext_    *lck_ext,
135 	lck_grp_t               *grp,
136 	lck_attr_t              *attr);
137 
138 #endif
139 
140 #define decl_lck_mtx_data(class, name)     class lck_mtx_t name
141 
142 extern lck_mtx_t        *lck_mtx_alloc_init(
143 	lck_grp_t               *grp,
144 	lck_attr_t              *attr);
145 
146 extern void             lck_mtx_init(
147 	lck_mtx_t               *lck,
148 	lck_grp_t               *grp,
149 	lck_attr_t              *attr);
150 extern void             lck_mtx_lock(
151 	lck_mtx_t               *lck);
152 
153 extern void             lck_mtx_unlock(
154 	lck_mtx_t               *lck);
155 
156 extern void             lck_mtx_destroy(
157 	lck_mtx_t               *lck,
158 	lck_grp_t               *grp);
159 
160 extern void             lck_mtx_free(
161 	lck_mtx_t               *lck,
162 	lck_grp_t               *grp);
163 
164 extern wait_result_t    lck_mtx_sleep(
165 	lck_mtx_t               *lck,
166 	lck_sleep_action_t      lck_sleep_action,
167 	event_t                 event,
168 	wait_interrupt_t        interruptible);
169 
170 extern wait_result_t    lck_mtx_sleep_deadline(
171 	lck_mtx_t               *lck,
172 	lck_sleep_action_t      lck_sleep_action,
173 	event_t                 event,
174 	wait_interrupt_t        interruptible,
175 	uint64_t                deadline);
176 
177 #ifdef KERNEL_PRIVATE
178 /*
179  * Name: lck_spin_sleep_with_inheritor
180  *
181  * Description: deschedule the current thread and wait on the waitq associated with event to be woken up.
182  *              While waiting, the sched priority of the waiting thread will contribute to the push of the event that will
183  *              be directed to the inheritor specified.
184  *              An interruptible mode and deadline can be specified to return earlier from the wait.
185  *
186  * Args:
187  *   Arg1: lck_spin_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified.
188  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
189  *   Arg3: event to wait on.
190  *   Arg4: thread to propagate the event push to.
191  *   Arg5: interruptible flag for wait.
192  *   Arg6: deadline for wait.
193  *
194  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
195  *             Lock will be dropped while waiting.
196  *             The inheritor specified cannot run in user space until another inheritor is specified for the event or a
197  *             wakeup for the event is called.
198  *
199  * Returns: result of the wait.
200  */
201 extern wait_result_t lck_spin_sleep_with_inheritor(
202 	lck_spin_t              *lock,
203 	lck_sleep_action_t      lck_sleep_action,
204 	event_t                 event,
205 	thread_t                inheritor,
206 	wait_interrupt_t        interruptible,
207 	uint64_t                deadline);
208 
209 /*
210  * Name: lck_ticket_sleep_with_inheritor
211  *
212  * Description: deschedule the current thread and wait on the waitq associated with event to be woken up.
213  *              While waiting, the sched priority of the waiting thread will contribute to the push of the event that will
214  *              be directed to the inheritor specified.
215  *              An interruptible mode and deadline can be specified to return earlier from the wait.
216  *
217  * Args:
218  *   Arg1: lck_ticket_t lock used to protect the sleep.  The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified.
219  *   Arg2: lck_grp_t associated with the lock.
220  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
221  *   Arg3: event to wait on.
222  *   Arg5: thread to propagate the event push to.
223  *   Arg6: interruptible flag for wait.
224  *   Arg7: deadline for wait.
225  *
226  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
227  *             Lock will be dropped while waiting.
228  *             The inheritor specified cannot run in user space until another inheritor is specified for the event or a
229  *             wakeup for the event is called.
230  *
231  * Returns: result of the wait.
232  */
233 extern wait_result_t lck_ticket_sleep_with_inheritor(
234 	lck_ticket_t            *lock,
235 	lck_grp_t               *grp,
236 	lck_sleep_action_t      lck_sleep_action,
237 	event_t                 event,
238 	thread_t                inheritor,
239 	wait_interrupt_t        interruptible,
240 	uint64_t                deadline);
241 
242 /*
243  * Name: lck_mtx_sleep_with_inheritor
244  *
245  * Description: deschedule the current thread and wait on the waitq associated with event to be woken up.
246  *              While waiting, the sched priority of the waiting thread will contribute to the push of the event that will
247  *              be directed to the inheritor specified.
248  *              An interruptible mode and deadline can be specified to return earlier from the wait.
249  *
250  * Args:
251  *   Arg1: lck_mtx_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified.
252  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
253  *   Arg3: event to wait on.
254  *   Arg4: thread to propagate the event push to.
255  *   Arg5: interruptible flag for wait.
256  *   Arg6: deadline for wait.
257  *
258  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
259  *             Lock will be dropped while waiting.
260  *             The inheritor specified cannot run in user space until another inheritor is specified for the event or a
261  *             wakeup for the event is called.
262  *
263  * Returns: result of the wait.
264  */
265 extern wait_result_t lck_mtx_sleep_with_inheritor(
266 	lck_mtx_t               *lock,
267 	lck_sleep_action_t      lck_sleep_action,
268 	event_t                 event,
269 	thread_t                inheritor,
270 	wait_interrupt_t        interruptible,
271 	uint64_t                deadline);
272 
273 /*
274  * Name: lck_mtx_sleep_with_inheritor
275  *
276  * Description: deschedule the current thread and wait on the waitq associated with event to be woken up.
277  *              While waiting, the sched priority of the waiting thread will contribute to the push of the event that will
278  *              be directed to the inheritor specified.
279  *              An interruptible mode and deadline can be specified to return earlier from the wait.
280  *
281  * Args:
282  *   Arg1: lck_rw_t lock used to protect the sleep. The lock will be dropped while sleeping and reaquired before returning according to the sleep action specified.
283  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE.
284  *   Arg3: event to wait on.
285  *   Arg4: thread to propagate the event push to.
286  *   Arg5: interruptible flag for wait.
287  *   Arg6: deadline for wait.
288  *
289  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
290  *             Lock will be dropped while waiting.
291  *             The inheritor specified cannot run in user space until another inheritor is specified for the event or a
292  *             wakeup for the event is called.
293  *
294  * Returns: result of the wait.
295  */
296 extern wait_result_t lck_rw_sleep_with_inheritor(
297 	lck_rw_t                *lock,
298 	lck_sleep_action_t      lck_sleep_action,
299 	event_t                 event,
300 	thread_t                inheritor,
301 	wait_interrupt_t        interruptible,
302 	uint64_t                deadline);
303 
304 /*
305  * Name: wakeup_one_with_inheritor
306  *
307  * Description: wake up one waiter for event if any. The thread woken up will be the one with the higher sched priority waiting on event.
308  *              The push for the event will be transferred from the last inheritor to the woken up thread.
309  *
310  * Args:
311  *   Arg1: event to wake from.
312  *   Arg2: wait result to pass to the woken up thread.
313  *   Arg3: pointer for storing the thread wokenup.
314  *
315  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
316  *
317  * Conditions: The new inheritor wokenup cannot run in user space until another inheritor is specified for the event or a
318  *             wakeup for the event is called.
319  *             A reference for the wokenup thread is acquired.
320  *             NOTE: this cannot be called from interrupt context.
321  */
322 extern kern_return_t wakeup_one_with_inheritor(
323 	event_t                 event,
324 	wait_result_t           result,
325 	lck_wake_action_t       action,
326 	thread_t                *thread_wokenup);
327 
328 /*
329  * Name: wakeup_all_with_inheritor
330  *
331  * Description: wake up all waiters waiting for event. The old inheritor will lose the push.
332  *
333  * Args:
334  *   Arg1: event to wake from.
335  *   Arg2: wait result to pass to the woken up threads.
336  *
337  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
338  *
339  * Conditions: NOTE: this cannot be called from interrupt context.
340  */
341 extern kern_return_t wakeup_all_with_inheritor(
342 	event_t                 event,
343 	wait_result_t           result);
344 
345 /*
346  * Name: change_sleep_inheritor
347  *
348  * Description: Redirect the push of the waiting threads of event to the new inheritor specified.
349  *
350  * Args:
351  *   Arg1: event to redirect the push.
352  *   Arg2: new inheritor for event.
353  *
354  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
355  *
356  * Conditions: In case of success, the new inheritor cannot run in user space until another inheritor is specified for the event or a
357  *             wakeup for the event is called.
358  *             NOTE: this cannot be called from interrupt context.
359  */
360 extern kern_return_t change_sleep_inheritor(
361 	event_t                 event,
362 	thread_t                inheritor);
363 
364 /*
365  * gate structure
366  */
367 #if XNU_KERNEL_PRIVATE
368 typedef struct gate {
369 	uintptr_t         gt_data;                // thread holder, interlock bit and waiter bit
370 	struct turnstile *gt_turnstile;           // turnstile, protected by the interlock bit
371 	union {
372 		struct {
373 			uint32_t  gt_refs:16,             // refs using the gate, protected by interlock bit
374 			    gt_alloc:1,                   // gate was allocated with gate_alloc_init
375 			    gt_type:2,                    // type bits for validity
376 			    gt_flags_pad:13;              // unused
377 		};
378 		uint32_t  gt_flags;
379 	};
380 } gate_t;
381 #else
382 typedef struct gate {
383 	uintptr_t         opaque1;
384 	uintptr_t         opaque2;
385 	uint32_t          opaque3;
386 } gate_t;
387 #endif /* XNU_KERNEL_PRIVATE */
388 
389 /*
390  * Possible gate_wait_result_t values.
391  */
392 __options_decl(gate_wait_result_t, unsigned int, {
393 	GATE_HANDOFF      = 0x00,         /* gate was handedoff to current thread */
394 	GATE_OPENED       = 0x01,         /* gate was opened */
395 	GATE_TIMED_OUT    = 0x02,         /* wait timedout */
396 	GATE_INTERRUPTED  = 0x03,         /* wait was interrupted */
397 });
398 
399 /*
400  * Gate flags used by gate_assert
401  */
402 __options_decl(gate_assert_flags_t, unsigned int, {
403 	GATE_ASSERT_CLOSED = 0x00,         /* asserts the gate is currently closed */
404 	GATE_ASSERT_OPEN   = 0x01,         /* asserts the gate is currently open */
405 	GATE_ASSERT_HELD   = 0x02,         /* asserts the gate is closed and held by current_thread() */
406 });
407 
408 /*
409  * Gate flags used by gate_handoff
410  */
411 __options_decl(gate_handoff_flags_t, unsigned int, {
412 	GATE_HANDOFF_DEFAULT            = 0x00,         /* a waiter must exist to handoff the gate */
413 	GATE_HANDOFF_OPEN_IF_NO_WAITERS = 0x1,         /* behave like a gate_open() if there are no waiters */
414 });
415 
416 /*
417  * Name: decl_lck_rw_gate_data
418  *
419  * Description: declares a gate variable with specified storage class.
420  *              The gate itself will be stored in this variable and it is the caller's responsibility
421  *              to ensure that this variable's memory is going to be accessible by all threads that will use
422  *              the gate.
423  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
424  *              be used in every thread.
425  *
426  *              The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with
427  *              lck_rw_gate_destroy() when not needed anymore.
428  *
429  *              The gate will be used in conjunction with a lck_rw_t.
430  *
431  * Args:
432  *   Arg1: storage class.
433  *   Arg2: variable name.
434  */
435 #define decl_lck_rw_gate_data(class, name)                              class gate_t name
436 
437 /*
438  * Name: lck_rw_gate_init
439  *
440  * Description: initializes a variable declared with decl_lck_rw_gate_data.
441  *
442  * Args:
443  *   Arg1: lck_rw_t lock used to protect the gate.
444  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
445  */
446 extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate);
447 
448 /*
449  * Name: lck_rw_gate_destroy
450  *
451  * Description: destroys a variable previously initialized
452  *              with lck_rw_gate_init().
453  *
454  * Args:
455  *   Arg1: lck_rw_t lock used to protect the gate.
456  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
457  */
458 extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate);
459 
460 /*
461  * Name: lck_rw_gate_alloc_init
462  *
463  * Description: allocates and initializes a gate_t.
464  *
465  * Args:
466  *   Arg1: lck_rw_t lock used to protect the gate.
467  *
468  * Returns:
469  *         gate_t allocated.
470  */
471 extern gate_t* lck_rw_gate_alloc_init(lck_rw_t *lock);
472 
473 /*
474  * Name: lck_rw_gate_free
475  *
476  * Description: destroys and tries to free a gate previously allocated
477  *              with lck_rw_gate_alloc_init().
478  *              The gate free might be delegated to the last thread returning
479  *              from the gate_wait().
480  *
481  * Args:
482  *   Arg1: lck_rw_t lock used to protect the gate.
483  *   Arg2: pointer to the gate obtained with lck_rw_gate_alloc_init().
484  */
485 extern void lck_rw_gate_free(lck_rw_t *lock, gate_t *gate);
486 
487 /*
488  * Name: lck_rw_gate_try_close
489  *
490  * Description: Tries to close the gate.
491  *              In case of success the current thread will be set as
492  *              the holder of the gate.
493  *
494  * Args:
495  *   Arg1: lck_rw_t lock used to protect the gate.
496  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
497  *
498  * Conditions: Lock must be held. Returns with the lock held.
499  *
500  * Returns:
501  *          KERN_SUCCESS in case the gate was successfully closed. The current thread is the new holder
502  *          of the gate.
503  *          A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
504  *          to wake up possible waiters on the gate before returning to userspace.
505  *          If the intent is to conditionally probe the gate before waiting, the lock must not be dropped
506  *          between the calls to lck_rw_gate_try_close() and lck_rw_gate_wait().
507  *
508  *          KERN_FAILURE in case the gate was already closed. Will panic if the current thread was already the holder of the gate.
509  *          lck_rw_gate_wait() should be called instead if the intent is to unconditionally wait on this gate.
510  *          The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should
511  *          be done without dropping the lock that is protecting the gate in between.
512  */
513 extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate);
514 
515 /*
516  * Name: lck_rw_gate_close
517  *
518  * Description: Closes the gate. The current thread will be set as
519  *              the holder of the gate. Will panic if the gate is already closed.
520  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
521  *              to wake up possible waiters on the gate before returning to userspace.
522  *
523  * Args:
524  *   Arg1: lck_rw_t lock used to protect the gate.
525  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
526  *
527  * Conditions: Lock must be held. Returns with the lock held.
528  *             The gate must be open.
529  *
530  */
531 extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate);
532 
533 
534 /*
535  * Name: lck_rw_gate_open
536  *
537  * Description: Opens the gate and wakes up possible waiters.
538  *
539  * Args:
540  *   Arg1: lck_rw_t lock used to protect the gate.
541  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
542  *
543  * Conditions: Lock must be held. Returns with the lock held.
544  *             The current thread must be the holder of the gate.
545  *
546  */
547 extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate);
548 
549 /*
550  * Name: lck_rw_gate_handoff
551  *
552  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
553  *              priority will be selected as the new holder of the gate, and woken up,
554  *              with the gate remaining in the closed state throughout.
555  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
556  *              will be returned.
557  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
558  *              case no waiters were found.
559  *
560  *
561  * Args:
562  *   Arg1: lck_rw_t lock used to protect the gate.
563  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
564  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
565  *
566  * Conditions: Lock must be held. Returns with the lock held.
567  *             The current thread must be the holder of the gate.
568  *
569  * Returns:
570  *          KERN_SUCCESS in case one of the waiters became the new holder.
571  *          KERN_NOT_WAITING in case there were no waiters.
572  *
573  */
574 extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, gate_handoff_flags_t flags);
575 
576 /*
577  * Name: lck_rw_gate_steal
578  *
579  * Description: Set the current ownership of the gate. It sets the current thread as the
580  *              new holder of the gate.
581  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
582  *              to wake up possible waiters on the gate before returning to userspace.
583  *              NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff()
584  *              anymore.
585  *
586  *
587  * Args:
588  *   Arg1: lck_rw_t lock used to protect the gate.
589  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
590  *
591  * Conditions: Lock must be held. Returns with the lock held.
592  *             The gate must be closed and the current thread must not already be the holder.
593  *
594  */
595 extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate);
596 
597 /*
598  * Name: lck_rw_gate_wait
599  *
600  * Description: Waits for the current thread to become the holder of the gate or for the
601  *              gate to become open. An interruptible mode and deadline can be specified
602  *              to return earlier from the wait.
603  *
604  * Args:
605  *   Arg1: lck_rw_t lock used to protect the gate.
606  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
607  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE, LCK_SLEEP_UNLOCK.
608  *   Arg3: interruptible flag for wait.
609  *   Arg4: deadline
610  *
611  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
612  *             Lock will be dropped while waiting.
613  *             The gate must be closed.
614  *
615  * Returns: Reason why the thread was woken up.
616  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
617  *                         A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on.
618  *                         to wake up possible waiters on the gate before returning to userspace.
619  *          GATE_OPENED - the gate was opened by the holder.
620  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
621  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
622  */
623 extern gate_wait_result_t lck_rw_gate_wait(lck_rw_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline);
624 
625 /*
626  * Name: lck_rw_gate_assert
627  *
628  * Description: asserts that the gate is in the specified state.
629  *
630  * Args:
631  *   Arg1: lck_rw_t lock used to protect the gate.
632  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
633  *   Arg3: flags to specified assert type.
634  *         GATE_ASSERT_CLOSED - the gate is currently closed
635  *         GATE_ASSERT_OPEN - the gate is currently opened
636  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
637  */
638 extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, gate_assert_flags_t flags);
639 
640 /*
641  * Name: decl_lck_mtx_gate_data
642  *
643  * Description: declares a gate variable with specified storage class.
644  *              The gate itself will be stored in this variable and it is the caller's responsibility
645  *              to ensure that this variable's memory is going to be accessible by all threads that will use
646  *              the gate.
647  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
648  *              be used in every thread.
649  *
650  *              The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with
651  *              lck_mtx_gate_destroy() when not needed anymore.
652  *
653  *              The gate will be used in conjunction with a lck_mtx_t.
654  *
655  * Args:
656  *   Arg1: storage class.
657  *   Arg2: variable name.
658  */
659 #define decl_lck_mtx_gate_data(class, name)                             class gate_t name
660 
661 /*
662  * Name: lck_mtx_gate_init
663  *
664  * Description: initializes a variable declared with decl_lck_mtx_gate_data.
665  *
666  * Args:
667  *   Arg1: lck_mtx_t lock used to protect the gate.
668  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
669  */
670 extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate);
671 
672 /*
673  * Name: lck_mtx_gate_destroy
674  *
675  * Description: destroys a variable previously initialized
676  *              with lck_mtx_gate_init().
677  *
678  * Args:
679  *   Arg1: lck_mtx_t lock used to protect the gate.
680  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
681  */
682 extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate);
683 
684 /*
685  * Name: lck_mtx_gate_alloc_init
686  *
687  * Description: allocates and initializes a gate_t.
688  *
689  * Args:
690  *   Arg1: lck_mtx_t lock used to protect the gate.
691  *
692  * Returns:
693  *         gate_t allocated.
694  */
695 extern gate_t* lck_mtx_gate_alloc_init(lck_mtx_t *lock);
696 
697 /*
698  * Name: lck_mtx_gate_free
699  *
700  * Description: destroys and tries to free a gate previously allocated
701  *	        with lck_mtx_gate_alloc_init().
702  *              The gate free might be delegated to the last thread returning
703  *              from the gate_wait().
704  *
705  * Args:
706  *   Arg1: lck_mtx_t lock used to protect the gate.
707  *   Arg2: pointer to the gate obtained with lck_mtx_gate_alloc_init().
708  */
709 extern void lck_mtx_gate_free(lck_mtx_t *lock, gate_t *gate);
710 
711 /*
712  * Name: lck_mtx_gate_try_close
713  *
714  * Description: Tries to close the gate.
715  *              In case of success the current thread will be set as
716  *              the holder of the gate.
717  *
718  * Args:
719  *   Arg1: lck_mtx_t lock used to protect the gate.
720  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
721  *
722  * Conditions: Lock must be held. Returns with the lock held.
723  *
724  * Returns:
725  *          KERN_SUCCESS in case the gate was successfully closed. The current thread is the new holder
726  *          of the gate.
727  *          A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
728  *          to wake up possible waiters on the gate before returning to userspace.
729  *          If the intent is to conditionally probe the gate before waiting, the lock must not be dropped
730  *          between the calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait().
731  *
732  *          KERN_FAILURE in case the gate was already closed. Will panic if the current thread was already the holder of the gate.
733  *          lck_mtx_gate_wait() should be called instead if the intent is to unconditionally wait on this gate.
734  *          The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should
735  *          be done without dropping the lock that is protecting the gate in between.
736  */
737 extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate);
738 
739 /*
740  * Name: lck_mtx_gate_close
741  *
742  * Description: Closes the gate. The current thread will be set as
743  *              the holder of the gate. Will panic if the gate is already closed.
744  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
745  *              to wake up possible waiters on the gate before returning to userspace.
746  *
747  * Args:
748  *   Arg1: lck_mtx_t lock used to protect the gate.
749  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
750  *
751  * Conditions: Lock must be held. Returns with the lock held.
752  *             The gate must be open.
753  *
754  */
755 extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate);
756 
757 /*
758  * Name: lck_mtx_gate_open
759  *
760  * Description: Opens of the gate and wakes up possible waiters.
761  *
762  * Args:
763  *   Arg1: lck_mtx_t lock used to protect the gate.
764  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
765  *
766  * Conditions: Lock must be held. Returns with the lock held.
767  *             The current thread must be the holder of the gate.
768  *
769  */
770 extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate);
771 
772 /*
773  * Name: lck_mtx_gate_handoff
774  *
775  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
776  *              priority will be selected as the new holder of the gate, and woken up,
777  *              with the gate remaining in the closed state throughout.
778  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
779  *              will be returned.
780  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
781  *              case no waiters were found.
782  *
783  *
784  * Args:
785  *   Arg1: lck_mtx_t lock used to protect the gate.
786  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
787  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
788  *
789  * Conditions: Lock must be held. Returns with the lock held.
790  *             The current thread must be the holder of the gate.
791  *
792  * Returns:
793  *          KERN_SUCCESS in case one of the waiters became the new holder.
794  *          KERN_NOT_WAITING in case there were no waiters.
795  *
796  */
797 extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, gate_handoff_flags_t flags);
798 
799 /*
800  * Name: lck_mtx_gate_steal
801  *
802  * Description: Steals the ownership of the gate. It sets the current thread as the
803  *              new holder of the gate.
804  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
805  *              to wake up possible waiters on the gate before returning to userspace.
806  *              NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff()
807  *              anymore.
808  *
809  *
810  * Args:
811  *   Arg1: lck_mtx_t lock used to protect the gate.
812  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
813  *
814  * Conditions: Lock must be held. Returns with the lock held.
815  *             The gate must be closed and the current thread must not already be the holder.
816  *
817  */
818 extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate);
819 
820 /*
821  * Name: lck_mtx_gate_wait
822  *
823  * Description: Waits for the current thread to become the holder of the gate or for the
824  *              gate to become open. An interruptible mode and deadline can be specified
825  *              to return earlier from the wait.
826  *
827  * Args:
828  *   Arg1: lck_mtx_t lock used to protect the gate.
829  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
830  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
831  *   Arg3: interruptible flag for wait.
832  *   Arg4: deadline
833  *
834  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
835  *             Lock will be dropped while waiting.
836  *             The gate must be closed.
837  *
838  * Returns: Reason why the thread was woken up.
839  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
840  *                         A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
841  *                         to wake up possible waiters on the gate before returning to userspace.
842  *          GATE_OPENED - the gate was opened by the holder.
843  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
844  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
845  */
846 extern gate_wait_result_t lck_mtx_gate_wait(lck_mtx_t *lock, gate_t *gate, lck_sleep_action_t lck_sleep_action, wait_interrupt_t interruptible, uint64_t deadline);
847 
848 /*
849  * Name: lck_mtx_gate_assert
850  *
851  * Description: asserts that the gate is in the specified state.
852  *
853  * Args:
854  *   Arg1: lck_mtx_t lock used to protect the gate.
855  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
856  *   Arg3: flags to specified assert type.
857  *         GATE_ASSERT_CLOSED - the gate is currently closed
858  *         GATE_ASSERT_OPEN - the gate is currently opened
859  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
860  */
861 extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, gate_assert_flags_t flags);
862 
863 extern boolean_t        lck_mtx_try_lock(
864 	lck_mtx_t               *lck);
865 
866 extern void             mutex_pause(uint32_t);
867 
868 extern void             lck_mtx_yield(
869 	lck_mtx_t               *lck);
870 
871 extern boolean_t        lck_mtx_try_lock_spin(
872 	lck_mtx_t               *lck);
873 
874 extern void             lck_mtx_lock_spin(
875 	lck_mtx_t               *lck);
876 
877 extern boolean_t        kdp_lck_mtx_lock_spin_is_acquired(
878 	lck_mtx_t               *lck);
879 
880 extern void             lck_mtx_convert_spin(
881 	lck_mtx_t               *lck);
882 
883 extern void             lck_mtx_lock_spin_always(
884 	lck_mtx_t               *lck);
885 
886 extern boolean_t        lck_mtx_try_lock_spin_always(
887 	lck_mtx_t               *lck);
888 
889 #define lck_mtx_unlock_always(l)        lck_mtx_unlock(l)
890 
891 extern void             lck_spin_assert(
892 	lck_spin_t              *lck,
893 	unsigned                int    type);
894 
895 #endif  /* KERNEL_PRIVATE */
896 
897 extern void             lck_mtx_assert(
898 	lck_mtx_t               *lck,
899 	unsigned                int    type);
900 
901 #if MACH_ASSERT
902 #define LCK_MTX_ASSERT(lck, type) lck_mtx_assert((lck),(type))
903 #define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type))
904 #else /* MACH_ASSERT */
905 #define LCK_MTX_ASSERT(lck, type)
906 #define LCK_SPIN_ASSERT(lck, type)
907 #endif /* MACH_ASSERT */
908 
909 #if DEBUG
910 #define LCK_MTX_ASSERT_DEBUG(lck, type) lck_mtx_assert((lck),(type))
911 #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type))
912 #else /* DEBUG */
913 #define LCK_MTX_ASSERT_DEBUG(lck, type)
914 #define LCK_SPIN_ASSERT_DEBUG(lck, type)
915 #endif /* DEBUG */
916 
917 #define LCK_ASSERT_OWNED                1
918 #define LCK_ASSERT_NOTOWNED             2
919 
920 #define LCK_MTX_ASSERT_OWNED    LCK_ASSERT_OWNED
921 #define LCK_MTX_ASSERT_NOTOWNED LCK_ASSERT_NOTOWNED
922 
923 #ifdef  MACH_KERNEL_PRIVATE
924 
925 typedef struct lck_spinlock_to_info {
926 	void     *lock;
927 #if DEBUG || DEVELOPMENT
928 	uintptr_t owner_thread_orig;
929 #endif /* DEBUG || DEVELOPMENT */
930 	uintptr_t owner_thread_cur;
931 	int       owner_cpu;
932 	uint32_t  extra;
933 } *lck_spinlock_to_info_t;
934 
935 extern volatile lck_spinlock_to_info_t lck_spinlock_timeout_in_progress;
936 PERCPU_DECL(struct lck_spinlock_to_info, lck_spinlock_to_info);
937 
938 extern void             lck_spinlock_timeout_set_orig_owner(
939 	uintptr_t owner);
940 
941 extern lck_spinlock_to_info_t lck_spinlock_timeout_hit(
942 	void     *lck,
943 	uintptr_t owner);
944 
945 struct turnstile;
946 extern void             lck_mtx_lock_wait(
947 	lck_mtx_t               *lck,
948 	thread_t                holder,
949 	struct turnstile        **ts);
950 
951 extern int              lck_mtx_lock_acquire(
952 	lck_mtx_t               *lck,
953 	struct turnstile        *ts);
954 
955 extern  boolean_t       lck_mtx_unlock_wakeup(
956 	lck_mtx_t               *lck,
957 	thread_t                holder);
958 
959 extern boolean_t        lck_mtx_ilk_unlock(
960 	lck_mtx_t               *lck);
961 
962 extern boolean_t        lck_mtx_ilk_try_lock(
963 	lck_mtx_t               *lck);
964 
965 extern void lck_mtx_wakeup_adjust_pri(thread_t thread, integer_t priority);
966 
967 #endif /* MACH_KERNEL_PRIVATE */
968 #if  XNU_KERNEL_PRIVATE
969 
970 uintptr_t unslide_for_kdebug(void* object);
971 
972 struct lck_attr_startup_spec {
973 	lck_attr_t              *lck_attr;
974 	uint32_t                lck_attr_set_flags;
975 	uint32_t                lck_attr_clear_flags;
976 };
977 
978 struct lck_spin_startup_spec {
979 	lck_spin_t              *lck;
980 	lck_grp_t               *lck_grp;
981 	lck_attr_t              *lck_attr;
982 };
983 
984 struct lck_mtx_startup_spec {
985 	lck_mtx_t               *lck;
986 	struct _lck_mtx_ext_    *lck_ext;
987 	lck_grp_t               *lck_grp;
988 	lck_attr_t              *lck_attr;
989 };
990 
991 extern void             lck_attr_startup_init(
992 	struct lck_attr_startup_spec *spec);
993 
994 extern void             lck_spin_startup_init(
995 	struct lck_spin_startup_spec *spec);
996 
997 extern void             lck_mtx_startup_init(
998 	struct lck_mtx_startup_spec *spec);
999 
1000 /*
1001  * Auto-initializing locks declarations
1002  * ------------------------------------
1003  *
1004  * Unless you need to configure your locks in very specific ways,
1005  * there is no point creating explicit lock attributes. For most
1006  * static locks, these declaration macros can be used:
1007  *
1008  * - LCK_SPIN_DECLARE for spinlocks,
1009  * - LCK_MTX_EARLY_DECLARE for mutexes initialized before memory
1010  *   allocations are possible,
1011  * - LCK_MTX_DECLARE for mutexes,
1012  *
1013  * For cases when some particular attributes need to be used,
1014  * these come in *_ATTR variants that take a variable declared with
1015  * LCK_ATTR_DECLARE as an argument.
1016  */
1017 #define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \
1018 	SECURITY_READ_ONLY_LATE(lck_attr_t) var; \
1019 	static __startup_data struct lck_attr_startup_spec \
1020 	__startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \
1021 	STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_SECOND, lck_attr_startup_init, \
1022 	    &__startup_lck_attr_spec_ ## var)
1023 
1024 #define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \
1025 	lck_spin_t var; \
1026 	static __startup_data struct lck_spin_startup_spec \
1027 	__startup_lck_spin_spec_ ## var = { &var, grp, attr }; \
1028 	STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_spin_startup_init, \
1029 	    &__startup_lck_spin_spec_ ## var)
1030 
1031 #define LCK_SPIN_DECLARE(var, grp) \
1032 	LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
1033 
1034 #define LCK_MTX_DECLARE_ATTR(var, grp, attr) \
1035 	lck_mtx_t var; \
1036 	static __startup_data struct lck_mtx_startup_spec \
1037 	__startup_lck_mtx_spec_ ## var = { &var, NULL, grp, attr }; \
1038 	STARTUP_ARG(LOCKS, STARTUP_RANK_FIRST, lck_mtx_startup_init, \
1039 	    &__startup_lck_mtx_spec_ ## var)
1040 
1041 #define LCK_MTX_DECLARE(var, grp) \
1042 	LCK_MTX_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
1043 
1044 #define LCK_MTX_EARLY_DECLARE_ATTR(var, grp, attr) \
1045 	lck_mtx_ext_t var ## _ext; \
1046 	lck_mtx_t var; \
1047 	static __startup_data struct lck_mtx_startup_spec \
1048 	__startup_lck_mtx_spec_ ## var = { &var, &var ## _ext, grp, attr }; \
1049 	STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_mtx_startup_init, \
1050 	    &__startup_lck_mtx_spec_ ## var)
1051 
1052 #define LCK_MTX_EARLY_DECLARE(var, grp) \
1053 	LCK_MTX_EARLY_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
1054 
1055 #endif /* XNU_KERNEL_PRIVATE */
1056 
1057 __END_DECLS
1058 
1059 #endif /* _KERN_LOCKS_H_ */
1060