xref: /xnu-11417.140.69/osfmk/kern/locks.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_LOCKS_H_
30 #define _KERN_LOCKS_H_
31 
32 #include <sys/cdefs.h>
33 #include <sys/appleapiopts.h>
34 
35 #include <mach/boolean.h>
36 #include <machine/locks.h>
37 
38 #include <kern/assert.h>
39 #include <kern/kern_types.h>
40 #include <kern/lock_attr.h>
41 #include <kern/lock_group.h>
42 #include <kern/lock_mtx.h>
43 #include <kern/lock_rw.h>
44 #include <kern/lock_types.h>
45 #ifdef KERNEL_PRIVATE
46 #include <kern/ticket_lock.h>
47 #endif
48 #ifdef  XNU_KERNEL_PRIVATE
49 #include <kern/startup.h>
50 #include <kern/percpu.h>
51 #endif /* XNU_KERNEL_PRIVATE */
52 
53 __BEGIN_DECLS
54 
55 #define decl_lck_spin_data(class, name)     class lck_spin_t name
56 
57 extern lck_spin_t      *lck_spin_alloc_init(
58 	lck_grp_t               *grp,
59 	lck_attr_t              *attr);
60 
61 extern void             lck_spin_init(
62 	lck_spin_t              *lck,
63 	lck_grp_t               *grp,
64 	lck_attr_t              *attr);
65 
66 extern void             lck_spin_lock(
67 	lck_spin_t              *lck);
68 
69 extern void             lck_spin_lock_grp(
70 	lck_spin_t              *lck,
71 	lck_grp_t               *grp);
72 
73 extern void             lck_spin_unlock(
74 	lck_spin_t              *lck);
75 
76 extern void             lck_spin_destroy(
77 	lck_spin_t              *lck,
78 	lck_grp_t               *grp);
79 
80 extern void             lck_spin_free(
81 	lck_spin_t              *lck,
82 	lck_grp_t               *grp);
83 
84 extern wait_result_t    lck_spin_sleep(
85 	lck_spin_t              *lck,
86 	lck_sleep_action_t      lck_sleep_action,
87 	event_t                 event,
88 	wait_interrupt_t        interruptible);
89 
90 extern wait_result_t    lck_spin_sleep_grp(
91 	lck_spin_t              *lck,
92 	lck_sleep_action_t      lck_sleep_action,
93 	event_t                 event,
94 	wait_interrupt_t        interruptible,
95 	lck_grp_t               *grp);
96 
97 extern wait_result_t    lck_spin_sleep_deadline(
98 	lck_spin_t              *lck,
99 	lck_sleep_action_t      lck_sleep_action,
100 	event_t                 event,
101 	wait_interrupt_t        interruptible,
102 	uint64_t                deadline);
103 
104 #ifdef  KERNEL_PRIVATE
105 
106 extern void             lck_spin_lock_nopreempt(
107 	lck_spin_t              *lck);
108 
109 extern void             lck_spin_lock_nopreempt_grp(
110 	lck_spin_t              *lck, lck_grp_t *grp);
111 
112 extern void             lck_spin_unlock_nopreempt(
113 	lck_spin_t              *lck);
114 
115 extern boolean_t        lck_spin_try_lock_grp(
116 	lck_spin_t              *lck,
117 	lck_grp_t               *grp);
118 
119 extern boolean_t        lck_spin_try_lock(
120 	lck_spin_t              *lck);
121 
122 extern boolean_t        lck_spin_try_lock_nopreempt(
123 	lck_spin_t              *lck);
124 
125 extern boolean_t        lck_spin_try_lock_nopreempt_grp(
126 	lck_spin_t              *lck,
127 	lck_grp_t               *grp);
128 
129 /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */
130 extern boolean_t        kdp_lck_spin_is_acquired(
131 	lck_spin_t              *lck);
132 
133 /*
134  * Name: lck_spin_sleep_with_inheritor
135  *
136  * Description:
137  *   deschedule the current thread and wait on the waitq associated with event
138  *   to be woken up.
139  *
140  *   While waiting, the sched priority of the waiting thread will contribute to
141  *   the push of the event that will be directed to the inheritor specified.
142  *
143  *   An interruptible mode and deadline can be specified to return earlier from
144  *   the wait.
145  *
146  * Args:
147  *   Arg1: lck_spin_t lock used to protect the sleep.
148  *         The lock will be dropped while sleeping and reaquired before
149  *         returning according to the sleep action specified.
150  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
151  *   Arg3: event to wait on.
152  *   Arg4: thread to propagate the event push to.
153  *   Arg5: interruptible flag for wait.
154  *   Arg6: deadline for wait.
155  *
156  * Conditions:
157  *   Lock must be held.
158  *
159  *   Returns with the lock held according to the sleep action specified.
160  *   Lock will be dropped while waiting.
161  *
162  *   The inheritor specified cannot return to user space or exit until another
163  *   inheritor is specified for the event or a wakeup for the event is called.
164  *
165  * Returns: result of the wait.
166  */
167 extern wait_result_t lck_spin_sleep_with_inheritor(
168 	lck_spin_t              *lock,
169 	lck_sleep_action_t      lck_sleep_action,
170 	event_t                 event,
171 	thread_t                inheritor,
172 	wait_interrupt_t        interruptible,
173 	uint64_t                deadline);
174 
175 #if MACH_KERNEL_PRIVATE
176 
177 /*
178  * Name: hw_lck_ticket_sleep_with_inheritor
179  *
180  * Description:
181  *   deschedule the current thread and wait on the waitq associated with event
182  *   to be woken up.
183  *
184  *   While waiting, the sched priority of the waiting thread will contribute to
185  *   the push of the event that will be directed to the inheritor specified.
186  *
187  *   An interruptible mode and deadline can be specified to return earlier from
188  *   the wait.
189  *
190  * Args:
191  *   Arg1: hw_lck_ticket_t lock used to protect the sleep.
192  *         The lock will be dropped while sleeping and reaquired before
193  *         returning according to the sleep action specified.
194  *   Arg2: lck_grp_t associated with the lock.
195  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
196  *   Arg3: event to wait on.
197  *   Arg5: thread to propagate the event push to.
198  *   Arg6: interruptible flag for wait.
199  *   Arg7: deadline for wait.
200  *
201  * Conditions:
202  *   Lock must be held.
203  *
204  *   Returns with the lock held according to the sleep action specified.
205  *
206  *   Lock will be dropped while waiting.
207  *
208  *   The inheritor specified cannot return to user space or exit until another
209  *   inheritor is specified for the event or a wakeup for the event is called.
210  *
211  * Returns: result of the wait.
212  */
213 extern wait_result_t hw_lck_ticket_sleep_with_inheritor(
214 	hw_lck_ticket_t         *lock,
215 	lck_grp_t               *grp,
216 	lck_sleep_action_t      lck_sleep_action,
217 	event_t                 event,
218 	thread_t                inheritor,
219 	wait_interrupt_t        interruptible,
220 	uint64_t                deadline);
221 
222 #endif
223 
224 /*
225  * Name: lck_ticket_sleep_with_inheritor
226  *
227  * Description:
228  *   deschedule the current thread and wait on the waitq associated with event
229  *   to be woken up.
230  *
231  *   While waiting, the sched priority of the waiting thread will contribute to
232  *   the push of the event that will be directed to the inheritor specified.
233  *
234  *   An interruptible mode and deadline can be specified to return earlier from
235  *   the wait.
236  *
237  * Args:
238  *   Arg1: lck_ticket_t lock used to protect the sleep.
239  *         The lock will be dropped while sleeping and reaquired before
240  *         returning according to the sleep action specified.
241  *   Arg2: lck_grp_t associated with the lock.
242  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
243  *   Arg3: event to wait on.
244  *   Arg5: thread to propagate the event push to.
245  *   Arg6: interruptible flag for wait.
246  *   Arg7: deadline for wait.
247  *
248  * Conditions:
249  *   Lock must be held.
250  *
251  *   Returns with the lock held according to the sleep action specified.
252  *
253  *   Lock will be dropped while waiting.
254  *
255  *   The inheritor specified cannot return to user space or exit until another
256  *   inheritor is specified for the event or a wakeup for the event is called.
257  *
258  * Returns: result of the wait.
259  */
260 extern wait_result_t lck_ticket_sleep_with_inheritor(
261 	lck_ticket_t            *lock,
262 	lck_grp_t               *grp,
263 	lck_sleep_action_t      lck_sleep_action,
264 	event_t                 event,
265 	thread_t                inheritor,
266 	wait_interrupt_t        interruptible,
267 	uint64_t                deadline);
268 
269 /*
270  * Name: lck_mtx_sleep_with_inheritor
271  *
272  * Description:
273  *   deschedule the current thread and wait on the waitq associated with event
274  *   to be woken up.
275  *
276  *   While waiting, the sched priority of the waiting thread will contribute to
277  *   the push of the event that will be directed to the inheritor specified.
278  *
279  *   An interruptible mode and deadline can be specified to return earlier from
280  *   the wait.
281  *
282  * Args:
283  *   Arg1: lck_mtx_t lock used to protect the sleep.
284  *         The lock will be dropped while sleeping and reaquired before
285  *         returning according to the sleep action specified.
286  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
287  *   Arg3: event to wait on.
288  *   Arg4: thread to propagate the event push to.
289  *   Arg5: interruptible flag for wait.
290  *   Arg6: deadline for wait.
291  *
292  * Conditions:
293  *   Lock must be held.
294  *
295  *   Returns with the lock held according to the sleep action specified.
296  *
297  *   Lock will be dropped while waiting.
298  *
299  *   The inheritor specified cannot return to user space or exit until another
300  *   inheritor is specified for the event or a wakeup for the event is called.
301  *
302  * Returns: result of the wait.
303  */
304 extern wait_result_t lck_mtx_sleep_with_inheritor(
305 	lck_mtx_t               *lock,
306 	lck_sleep_action_t      lck_sleep_action,
307 	event_t                 event,
308 	thread_t                inheritor,
309 	wait_interrupt_t        interruptible,
310 	uint64_t                deadline);
311 
312 /*
313  * Name: lck_rw_sleep_with_inheritor
314  *
315  * Description:
316  *   deschedule the current thread and wait on the waitq associated with event
317  *   to be woken up.
318  *
319  *   While waiting, the sched priority of the waiting thread will contribute to
320  *   the push of the event that will be directed to the inheritor specified.
321  *
322  *   An interruptible mode and deadline can be specified to return earlier from
323  *   the wait.
324  *
325  * Args:
326  *   Arg1: lck_rw_t lock used to protect the sleep.
327  *         The lock will be dropped while sleeping and reaquired before
328  *         returning according to the sleep action specified.
329  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE.
330  *   Arg3: event to wait on.
331  *   Arg4: thread to propagate the event push to.
332  *   Arg5: interruptible flag for wait.
333  *   Arg6: deadline for wait.
334  *
335  * Conditions:
336  *   Lock must be held.
337  *
338  *   Returns with the lock held according to the sleep action specified.
339  *
340  *   Lock will be dropped while waiting.
341  *
342  *   The inheritor specified cannot return to user space or exit until another
343  *   inheritor is specified for the event or a wakeup for the event is called.
344  *
345  * Returns: result of the wait.
346  */
347 extern wait_result_t lck_rw_sleep_with_inheritor(
348 	lck_rw_t                *lock,
349 	lck_sleep_action_t      lck_sleep_action,
350 	event_t                 event,
351 	thread_t                inheritor,
352 	wait_interrupt_t        interruptible,
353 	uint64_t                deadline);
354 
355 /*
356  * Name: wakeup_one_with_inheritor
357  *
358  * Description:
359  *   Wake up one waiter for event if any.
360  *
361  *   The thread woken up will be the one with the higher sched priority waiting
362  *   on event.
363  *
364  *   The push for the event will be transferred from the last inheritor to the
365  *   woken up thread.
366  *
367  * Args:
368  *   Arg1: event to wake from.
369  *   Arg2: wait result to pass to the woken up thread.
370  *   Arg3: pointer for storing the thread wokenup.
371  *
372  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
373  *
374  * Conditions:
375  *   The new woken up inheritor cannot return to user space or exit until
376  *   another inheritor is specified for the event or a new wakeup for the event
377  *   is performed.
378  *
379  *   A reference for the woken thread is acquired.
380  *
381  *   NOTE: this cannot be called from interrupt context.
382  */
383 extern kern_return_t wakeup_one_with_inheritor(
384 	event_t                 event,
385 	wait_result_t           result,
386 	lck_wake_action_t       action,
387 	thread_t                *thread_wokenup);
388 
389 extern kern_return_t wakeup_thread_with_inheritor(
390 	event_t                 event,
391 	wait_result_t           result,
392 	lck_wake_action_t       action,
393 	thread_t                thread_towake);
394 
395 /*
396  * Name: wakeup_all_with_inheritor
397  *
398  * Description: wake up all waiters waiting for event. The old inheritor will lose the push.
399  *
400  * Args:
401  *   Arg1: event to wake from.
402  *   Arg2: wait result to pass to the woken up threads.
403  *
404  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
405  *
406  * Conditions: NOTE: this cannot be called from interrupt context.
407  */
408 extern kern_return_t wakeup_all_with_inheritor(
409 	event_t                 event,
410 	wait_result_t           result);
411 
412 /*
413  * Name: change_sleep_inheritor
414  *
415  * Description:
416  *   Redirect the push of the waiting threads of event to the new inheritor specified.
417  *
418  * Args:
419  *   Arg1: event to redirect the push.
420  *   Arg2: new inheritor for event.
421  *
422  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
423  *
424  * Conditions:
425  *   In case of success, the new inheritor cannot return to user space or exit
426  *   until another inheritor is specified for the event or a wakeup for the
427  *   event is called.
428  *
429  *   NOTE: this cannot be called from interrupt context.
430  */
431 extern kern_return_t change_sleep_inheritor(
432 	event_t                 event,
433 	thread_t                inheritor);
434 
435 
436 #if XNU_KERNEL_PRIVATE
437 
438 /*
439  * Bits layout of cond_swi_var32/cond_swi_var64.
440  * First SWI_COND_OWNER_BITS are reserved for the owner
441  * the remaining can be used by the caller
442  */
443 #define SWI_COND_OWNER_BITS     20
444 #define SWI_COND_CALLER_BITS    (32 - SWI_COND_OWNER_BITS)
445 
446 typedef struct cond_swi_var32 {
447 	union {
448 		uint32_t cond32_data;
449 		struct {
450 			uint32_t cond32_owner: SWI_COND_OWNER_BITS,
451 			    cond32_caller_bits: SWI_COND_CALLER_BITS;
452 		};
453 	};
454 } cond_swi_var32_s;
455 
456 typedef struct cond_swi_var64 {
457 	union {
458 		uint64_t cond64_data;
459 		struct {
460 			uint32_t cond64_owner: SWI_COND_OWNER_BITS,
461 			    cond64_caller_bits: SWI_COND_CALLER_BITS;
462 			uint32_t cond64_caller_extra;
463 		};
464 	};
465 } cond_swi_var64_s;
466 
467 typedef struct cond_swi_var *cond_swi_var_t;
468 
469 /*
470  * Name: cond_sleep_with_inheritor32
471  *
472  * Description: Conditionally sleeps with inheritor, with condition variable of 32bits.
473  *              Allows a thread to conditionally sleep while indicating which thread should
474  *              inherit the priority push associated with the condition.
475  *              The condition should be expressed through a cond_swi_var32_s pointer.
476  *              The condition needs to be populated by the caller with the ctid of the
477  *              thread that should inherit the push. The remaining bits of the condition
478  *              can be used by the caller to implement its own synchronization logic.
479  *              A copy of the condition value observed by the caller when it decided to call
480  *              this function should be provided to prevent races with matching wakeups.
481  *              This function will atomically check the value stored in the condition against
482  *              the expected/observed one provided. If the check doesn't pass the thread will not
483  *              sleep and the function will return.
484  *              The ctid provided in the condition will be used only after a successful
485  *              check.
486  *
487  * Args:
488  *   Arg1: cond_swi_var32_s pointer that stores the condition to check.
489  *   Arg2: cond_swi_var32_s observed value to check for conditionally sleep.
490  *   Arg3: interruptible flag for wait.
491  *   Arg4: deadline for wait.
492  *
493  * Conditions:
494  *   The inheritor specified cannot return to user space or exit until another
495  *   inheritor is specified for the cond or a wakeup for the cond is called.
496  *
497  * Returns: result of the wait.
498  */
499 extern wait_result_t cond_sleep_with_inheritor32(
500 	cond_swi_var_t          cond,
501 	cond_swi_var32_s        expected_cond,
502 	wait_interrupt_t        interruptible,
503 	uint64_t                deadline);
504 
505 /*
506  * Name: cond_sleep_with_inheritor64
507  *
508  * Description: Conditionally sleeps with inheritor, with condition variable of 64bits.
509  *              Allows a thread to conditionally sleep while indicating which thread should
510  *              inherit the priority push associated with the condition.
511  *              The condition should be expressed through a cond_swi_var64_s pointer.
512  *              The condition needs to be populated by the caller with the ctid of the
513  *              thread that should inherit the push. The remaining bits of the condition
514  *              can be used by the caller to implement its own synchronization logic.
515  *              A copy of the condition value observed by the caller when it decided to call
516  *              this function should be provided to prevent races with matching wakeups.
517  *              This function will atomically check the value stored in the condition against
518  *              the expected/observed one provided. If the check doesn't pass the thread will not
519  *              sleep and the function will return.
520  *              The ctid provided in the condition will be used only after a successful
521  *              check.
522  *
523  * Args:
524  *   Arg1: cond_swi_var64_s pointer that stores the condition to check.
525  *   Arg2: cond_swi_var64_s observed value to check for conditionally sleep.
526  *   Arg3: interruptible flag for wait.
527  *   Arg4: deadline for wait.
528  *
529  * Conditions:
530  *   The inheritor specified cannot return to user space or exit until another
531  *   inheritor is specified for the cond or a wakeup for the cond is called.
532  *
533  * Returns: result of the wait.
534  */
535 extern wait_result_t cond_sleep_with_inheritor64(
536 	cond_swi_var_t          cond,
537 	cond_swi_var64_s        expected_cond,
538 	wait_interrupt_t        interruptible,
539 	uint64_t                deadline);
540 
541 /*
542  * Name: cond_sleep_with_inheritor64_mask
543  *
544  * Description: Conditionally sleeps with inheritor, with condition variable of 64bits.
545  *              Allows a thread to conditionally sleep while indicating which thread should
546  *              inherit the priority push associated with the condition.
547  *              The condition should be expressed through a cond_swi_var64_s pointer.
548  *              The condition needs to be populated by the caller with the ctid of the
549  *              thread that should inherit the push. The remaining bits of the condition
550  *              can be used by the caller to implement its own synchronization logic.
551  *              A copy of the condition value observed by the caller when it decided to call
552  *              this function should be provided to prevent races with matching wakeups.
553  *              This function will atomically check the value stored in the condition against
554  *              the expected/observed one provided only for the bits that are set in the mask.
555  *              If the check doesn't pass the thread will not sleep and the function will return.
556  *              The ctid provided in the condition will be used only after a successful
557  *              check.
558  *
559  * Args:
560  *   Arg1: cond_swi_var64_s pointer that stores the condition to check.
561  *   Arg2: cond_swi_var64_s observed value to check for conditionally sleep.
562  *   Arg3: mask to apply to the condition to check.
563  *   Arg4: interruptible flag for wait.
564  *   Arg5: deadline for wait.
565  *
566  * Conditions:
567  *   The inheritor specified cannot return to user space or exit until another
568  *   inheritor is specified for the cond or a wakeup for the cond is called.
569  *
570  * Returns: result of the wait.
571  */
572 extern wait_result_t cond_sleep_with_inheritor64_mask(
573 	cond_swi_var_t          cond,
574 	cond_swi_var64_s        expected_cond,
575 	uint64_t                check_mask,
576 	wait_interrupt_t        interruptible,
577 	uint64_t                deadline);
578 
579 /*
580  * Name: cond_wakeup_one_with_inheritor
581  *
582  * Description: Wake up one waiter waiting on the condition (if any).
583  *              The thread woken up will be the one with the higher sched priority waiting on the condition.
584  *              The push for the condition will be transferred from the last inheritor to the woken up thread.
585  *
586  * Args:
587  *   Arg1: condition to wake from.
588  *   Arg2: wait result to pass to the woken up thread.
589  *   Arg3: pointer for storing the thread wokenup.
590  *
591  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
592  *
593  * Conditions:
594  *   The new woken up inheritor cannot return to user space or exit until
595  *   another inheritor is specified for the event or a new wakeup for the event
596  *   is performed.
597  *
598  *   A reference for the woken thread is acquired.
599  *
600  *   NOTE: this cannot be called from interrupt context.
601  */
602 extern kern_return_t cond_wakeup_one_with_inheritor(
603 	cond_swi_var_t          cond,
604 	wait_result_t           result,
605 	lck_wake_action_t       action,
606 	thread_t                *thread_wokenup);
607 
608 /*
609  * Name: cond_wakeup_all_with_inheritor
610  *
611  * Description: Wake up all waiters waiting on the same condition. The old inheritor will lose the push.
612  *
613  * Args:
614  *   Arg1: condition to wake from.
615  *   Arg2: wait result to pass to the woken up threads.
616  *
617  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
618  *
619  * Conditions: NOTE: this cannot be called from interrupt context.
620  */
621 extern kern_return_t cond_wakeup_all_with_inheritor(
622 	cond_swi_var_t          cond,
623 	wait_result_t           result);
624 
625 /*
626  * gate structure
627  */
628 typedef struct gate {
629 	uintptr_t         gt_data;                // thread holder, interlock bit and waiter bit
630 	struct turnstile *gt_turnstile;           // turnstile, protected by the interlock bit
631 	union {
632 		struct {
633 			uint32_t  gt_refs:16,             // refs using the gate, protected by interlock bit
634 			    gt_alloc:1,                   // gate was allocated with gate_alloc_init
635 			    gt_type:2,                    // type bits for validity
636 			    gt_flags_pad:13;              // unused
637 		};
638 		uint32_t  gt_flags;
639 	};
640 } gate_t;
641 
642 #else /* XNU_KERNEL_PRIVATE */
643 
644 typedef struct gate {
645 	uintptr_t         opaque1;
646 	uintptr_t         opaque2;
647 	uint32_t          opaque3;
648 } gate_t;
649 
650 #endif /* XNU_KERNEL_PRIVATE */
651 
652 /*
653  * Possible gate_wait_result_t values.
654  */
655 __options_decl(gate_wait_result_t, unsigned int, {
656 	GATE_HANDOFF      = 0x00,         /* gate was handedoff to current thread */
657 	GATE_OPENED       = 0x01,         /* gate was opened */
658 	GATE_TIMED_OUT    = 0x02,         /* wait timedout */
659 	GATE_INTERRUPTED  = 0x03,         /* wait was interrupted */
660 });
661 
662 /*
663  * Gate flags used by gate_assert
664  */
665 __options_decl(gate_assert_flags_t, unsigned int, {
666 	GATE_ASSERT_CLOSED = 0x00,         /* asserts the gate is currently closed */
667 	GATE_ASSERT_OPEN   = 0x01,         /* asserts the gate is currently open */
668 	GATE_ASSERT_HELD   = 0x02,         /* asserts the gate is closed and held by current_thread() */
669 });
670 
671 /*
672  * Gate flags used by gate_handoff
673  */
674 __options_decl(gate_handoff_flags_t, unsigned int, {
675 	GATE_HANDOFF_DEFAULT            = 0x00,         /* a waiter must exist to handoff the gate */
676 	GATE_HANDOFF_OPEN_IF_NO_WAITERS = 0x1,         /* behave like a gate_open() if there are no waiters */
677 });
678 
679 /*
680  * Name: decl_lck_rw_gate_data
681  *
682  * Description: declares a gate variable with specified storage class.
683  *              The gate itself will be stored in this variable and it is the caller's responsibility
684  *              to ensure that this variable's memory is going to be accessible by all threads that will use
685  *              the gate.
686  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
687  *              be used in every thread.
688  *
689  *              The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with
690  *              lck_rw_gate_destroy() when not needed anymore.
691  *
692  *              The gate will be used in conjunction with a lck_rw_t.
693  *
694  * Args:
695  *   Arg1: storage class.
696  *   Arg2: variable name.
697  */
698 #define decl_lck_rw_gate_data(class, name)                              class gate_t name
699 
700 /*
701  * Name: lck_rw_gate_init
702  *
703  * Description: initializes a variable declared with decl_lck_rw_gate_data.
704  *
705  * Args:
706  *   Arg1: lck_rw_t lock used to protect the gate.
707  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
708  */
709 extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate);
710 
711 /*
712  * Name: lck_rw_gate_destroy
713  *
714  * Description: destroys a variable previously initialized
715  *              with lck_rw_gate_init().
716  *
717  * Args:
718  *   Arg1: lck_rw_t lock used to protect the gate.
719  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
720  */
721 extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate);
722 
723 /*
724  * Name: lck_rw_gate_alloc_init
725  *
726  * Description: allocates and initializes a gate_t.
727  *
728  * Args:
729  *   Arg1: lck_rw_t lock used to protect the gate.
730  *
731  * Returns:
732  *         gate_t allocated.
733  */
734 extern gate_t* lck_rw_gate_alloc_init(lck_rw_t *lock);
735 
736 /*
737  * Name: lck_rw_gate_free
738  *
739  * Description: destroys and tries to free a gate previously allocated
740  *              with lck_rw_gate_alloc_init().
741  *              The gate free might be delegated to the last thread returning
742  *              from the gate_wait().
743  *
744  * Args:
745  *   Arg1: lck_rw_t lock used to protect the gate.
746  *   Arg2: pointer to the gate obtained with lck_rw_gate_alloc_init().
747  */
748 extern void lck_rw_gate_free(lck_rw_t *lock, gate_t *gate);
749 
750 /*
751  * Name: lck_rw_gate_try_close
752  *
753  * Description: Tries to close the gate.
754  *              In case of success the current thread will be set as
755  *              the holder of the gate.
756  *
757  * Args:
758  *   Arg1: lck_rw_t lock used to protect the gate.
759  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
760  *
761  * Conditions: Lock must be held. Returns with the lock held.
762  *
763  * Returns:
764  *   KERN_SUCCESS in case the gate was successfully closed. The current thread
765  *   is the new holder of the gate.
766  *
767  *   A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called
768  *   later on to wake up possible waiters on the gate before returning to
769  *   userspace.
770  *
771  *   If the intent is to conditionally probe the gate before waiting, the lock
772  *   must not be dropped between the calls to lck_rw_gate_try_close() and
773  *   lck_rw_gate_wait().
774  *
775  *   KERN_FAILURE in case the gate was already closed.
776  *   Will panic if the current thread was already the holder of the gate.
777  *
778  *   lck_rw_gate_wait() should be called instead if the intent is to
779  *   unconditionally wait on this gate.
780  *
781  *   The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should
782  *   be done without dropping the lock that is protecting the gate in between.
783  */
784 extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate);
785 
786 /*
787  * Name: lck_rw_gate_close
788  *
789  * Description: Closes the gate. The current thread will be set as
790  *              the holder of the gate. Will panic if the gate is already closed.
791  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
792  *              to wake up possible waiters on the gate before returning to userspace.
793  *
794  * Args:
795  *   Arg1: lck_rw_t lock used to protect the gate.
796  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
797  *
798  * Conditions: Lock must be held. Returns with the lock held.
799  *             The gate must be open.
800  *
801  */
802 extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate);
803 
804 
805 /*
806  * Name: lck_rw_gate_open
807  *
808  * Description: Opens the gate and wakes up possible waiters.
809  *
810  * Args:
811  *   Arg1: lck_rw_t lock used to protect the gate.
812  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
813  *
814  * Conditions: Lock must be held. Returns with the lock held.
815  *             The current thread must be the holder of the gate.
816  *
817  */
818 extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate);
819 
820 /*
821  * Name: lck_rw_gate_handoff
822  *
823  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
824  *              priority will be selected as the new holder of the gate, and woken up,
825  *              with the gate remaining in the closed state throughout.
826  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
827  *              will be returned.
828  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
829  *              case no waiters were found.
830  *
831  *
832  * Args:
833  *   Arg1: lck_rw_t lock used to protect the gate.
834  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
835  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
836  *
837  * Conditions: Lock must be held. Returns with the lock held.
838  *             The current thread must be the holder of the gate.
839  *
840  * Returns:
841  *          KERN_SUCCESS in case one of the waiters became the new holder.
842  *          KERN_NOT_WAITING in case there were no waiters.
843  *
844  */
845 extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, gate_handoff_flags_t flags);
846 
847 /*
848  * Name: lck_rw_gate_steal
849  *
850  * Description: Set the current ownership of the gate. It sets the current thread as the
851  *              new holder of the gate.
852  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
853  *              to wake up possible waiters on the gate before returning to userspace.
854  *              NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff()
855  *              anymore.
856  *
857  *
858  * Args:
859  *   Arg1: lck_rw_t lock used to protect the gate.
860  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
861  *
862  * Conditions: Lock must be held. Returns with the lock held.
863  *             The gate must be closed and the current thread must not already be the holder.
864  *
865  */
866 extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate);
867 
868 /*
869  * Name: lck_rw_gate_wait
870  *
871  * Description: Waits for the current thread to become the holder of the gate or for the
872  *              gate to become open. An interruptible mode and deadline can be specified
873  *              to return earlier from the wait.
874  *
875  * Args:
876  *   Arg1: lck_rw_t lock used to protect the gate.
877  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
878  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE, LCK_SLEEP_UNLOCK.
879  *   Arg3: interruptible flag for wait.
880  *   Arg4: deadline
881  *
882  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
883  *             Lock will be dropped while waiting.
884  *             The gate must be closed.
885  *
886  * Returns: Reason why the thread was woken up.
887  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
888  *                         A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on.
889  *                         to wake up possible waiters on the gate before returning to userspace.
890  *          GATE_OPENED - the gate was opened by the holder.
891  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
892  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
893  */
894 extern gate_wait_result_t lck_rw_gate_wait(
895 	lck_rw_t               *lock,
896 	gate_t                 *gate,
897 	lck_sleep_action_t      lck_sleep_action,
898 	wait_interrupt_t        interruptible,
899 	uint64_t                deadline);
900 
901 /*
902  * Name: lck_rw_gate_assert
903  *
904  * Description: asserts that the gate is in the specified state.
905  *
906  * Args:
907  *   Arg1: lck_rw_t lock used to protect the gate.
908  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
909  *   Arg3: flags to specified assert type.
910  *         GATE_ASSERT_CLOSED - the gate is currently closed
911  *         GATE_ASSERT_OPEN - the gate is currently opened
912  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
913  */
914 extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, gate_assert_flags_t flags);
915 
916 /*
917  * Name: decl_lck_mtx_gate_data
918  *
919  * Description: declares a gate variable with specified storage class.
920  *              The gate itself will be stored in this variable and it is the caller's responsibility
921  *              to ensure that this variable's memory is going to be accessible by all threads that will use
922  *              the gate.
923  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
924  *              be used in every thread.
925  *
926  *              The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with
927  *              lck_mtx_gate_destroy() when not needed anymore.
928  *
929  *              The gate will be used in conjunction with a lck_mtx_t.
930  *
931  * Args:
932  *   Arg1: storage class.
933  *   Arg2: variable name.
934  */
935 #define decl_lck_mtx_gate_data(class, name)                             class gate_t name
936 
937 /*
938  * Name: lck_mtx_gate_init
939  *
940  * Description: initializes a variable declared with decl_lck_mtx_gate_data.
941  *
942  * Args:
943  *   Arg1: lck_mtx_t lock used to protect the gate.
944  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
945  */
946 extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate);
947 
948 /*
949  * Name: lck_mtx_gate_destroy
950  *
951  * Description: destroys a variable previously initialized
952  *              with lck_mtx_gate_init().
953  *
954  * Args:
955  *   Arg1: lck_mtx_t lock used to protect the gate.
956  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
957  */
958 extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate);
959 
960 /*
961  * Name: lck_mtx_gate_alloc_init
962  *
963  * Description: allocates and initializes a gate_t.
964  *
965  * Args:
966  *   Arg1: lck_mtx_t lock used to protect the gate.
967  *
968  * Returns:
969  *         gate_t allocated.
970  */
971 extern gate_t* lck_mtx_gate_alloc_init(lck_mtx_t *lock);
972 
973 /*
974  * Name: lck_mtx_gate_free
975  *
976  * Description: destroys and tries to free a gate previously allocated
977  *	        with lck_mtx_gate_alloc_init().
978  *              The gate free might be delegated to the last thread returning
979  *              from the gate_wait().
980  *
981  * Args:
982  *   Arg1: lck_mtx_t lock used to protect the gate.
983  *   Arg2: pointer to the gate obtained with lck_mtx_gate_alloc_init().
984  */
985 extern void lck_mtx_gate_free(lck_mtx_t *lock, gate_t *gate);
986 
987 /*
988  * Name: lck_mtx_gate_try_close
989  *
990  * Description: Tries to close the gate.
991  *              In case of success the current thread will be set as
992  *              the holder of the gate.
993  *
994  * Args:
995  *   Arg1: lck_mtx_t lock used to protect the gate.
996  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
997  *
998  * Conditions: Lock must be held. Returns with the lock held.
999  *
1000  * Returns:
1001  *   KERN_SUCCESS in case the gate was successfully closed. The current thread
1002  *   is the new holder of the gate.
1003  *
1004  *   A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called
1005  *   later on to wake up possible waiters on the gate before returning to
1006  *   userspace.
1007  *
1008  *   If the intent is to conditionally probe the gate before waiting, the lock
1009  *   must not be dropped between the calls to lck_mtx_gate_try_close() and
1010  *   lck_mtx_gate_wait().
1011  *
1012  *   KERN_FAILURE in case the gate was already closed. Will panic if the current
1013  *   thread was already the holder of the gate.
1014  *
1015  *   lck_mtx_gate_wait() should be called instead if the intent is to
1016  *   unconditionally wait on this gate.
1017  *
1018  *   The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should
1019  *   be done without dropping the lock that is protecting the gate in between.
1020  */
1021 extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate);
1022 
1023 /*
1024  * Name: lck_mtx_gate_close
1025  *
1026  * Description: Closes the gate. The current thread will be set as
1027  *              the holder of the gate. Will panic if the gate is already closed.
1028  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1029  *              to wake up possible waiters on the gate before returning to userspace.
1030  *
1031  * Args:
1032  *   Arg1: lck_mtx_t lock used to protect the gate.
1033  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1034  *
1035  * Conditions: Lock must be held. Returns with the lock held.
1036  *             The gate must be open.
1037  *
1038  */
1039 extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate);
1040 
1041 /*
1042  * Name: lck_mtx_gate_open
1043  *
1044  * Description: Opens of the gate and wakes up possible waiters.
1045  *
1046  * Args:
1047  *   Arg1: lck_mtx_t lock used to protect the gate.
1048  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1049  *
1050  * Conditions: Lock must be held. Returns with the lock held.
1051  *             The current thread must be the holder of the gate.
1052  *
1053  */
1054 extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate);
1055 
1056 /*
1057  * Name: lck_mtx_gate_handoff
1058  *
1059  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
1060  *              priority will be selected as the new holder of the gate, and woken up,
1061  *              with the gate remaining in the closed state throughout.
1062  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
1063  *              will be returned.
1064  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
1065  *              case no waiters were found.
1066  *
1067  *
1068  * Args:
1069  *   Arg1: lck_mtx_t lock used to protect the gate.
1070  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1071  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
1072  *
1073  * Conditions: Lock must be held. Returns with the lock held.
1074  *             The current thread must be the holder of the gate.
1075  *
1076  * Returns:
1077  *          KERN_SUCCESS in case one of the waiters became the new holder.
1078  *          KERN_NOT_WAITING in case there were no waiters.
1079  *
1080  */
1081 extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, gate_handoff_flags_t flags);
1082 
1083 /*
1084  * Name: lck_mtx_gate_steal
1085  *
1086  * Description: Steals the ownership of the gate. It sets the current thread as the
1087  *              new holder of the gate.
1088  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1089  *              to wake up possible waiters on the gate before returning to userspace.
1090  *              NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff()
1091  *              anymore.
1092  *
1093  *
1094  * Args:
1095  *   Arg1: lck_mtx_t lock used to protect the gate.
1096  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1097  *
1098  * Conditions: Lock must be held. Returns with the lock held.
1099  *             The gate must be closed and the current thread must not already be the holder.
1100  *
1101  */
1102 extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate);
1103 
1104 /*
1105  * Name: lck_mtx_gate_wait
1106  *
1107  * Description: Waits for the current thread to become the holder of the gate or for the
1108  *              gate to become open. An interruptible mode and deadline can be specified
1109  *              to return earlier from the wait.
1110  *
1111  * Args:
1112  *   Arg1: lck_mtx_t lock used to protect the gate.
1113  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1114  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
1115  *   Arg3: interruptible flag for wait.
1116  *   Arg4: deadline
1117  *
1118  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
1119  *             Lock will be dropped while waiting.
1120  *             The gate must be closed.
1121  *
1122  * Returns: Reason why the thread was woken up.
1123  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
1124  *                         A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1125  *                         to wake up possible waiters on the gate before returning to userspace.
1126  *          GATE_OPENED - the gate was opened by the holder.
1127  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
1128  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
1129  */
1130 extern gate_wait_result_t lck_mtx_gate_wait(
1131 	lck_mtx_t              *lock,
1132 	gate_t                 *gate,
1133 	lck_sleep_action_t      lck_sleep_action,
1134 	wait_interrupt_t        interruptible,
1135 	uint64_t                deadline);
1136 
1137 /*
1138  * Name: lck_mtx_gate_assert
1139  *
1140  * Description: asserts that the gate is in the specified state.
1141  *
1142  * Args:
1143  *   Arg1: lck_mtx_t lock used to protect the gate.
1144  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1145  *   Arg3: flags to specified assert type.
1146  *         GATE_ASSERT_CLOSED - the gate is currently closed
1147  *         GATE_ASSERT_OPEN - the gate is currently opened
1148  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
1149  */
1150 extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, gate_assert_flags_t flags);
1151 
1152 extern void             lck_spin_assert(
1153 	const lck_spin_t              *lck,
1154 	unsigned                int    type);
1155 
1156 #if CONFIG_PV_TICKET
1157 __startup_func extern void lck_init_pv(void);
1158 #endif
1159 
1160 #endif  /* KERNEL_PRIVATE */
1161 
1162 #if MACH_ASSERT
1163 #define LCK_SPIN_ASSERT(lck, type) MACH_ASSERT_DO(lck_spin_assert(lck, type))
1164 #else /* !MACH_ASSERT */
1165 #define LCK_SPIN_ASSERT(lck, type)
1166 #endif /* !MACH_ASSERT */
1167 
1168 #if DEBUG
1169 #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type))
1170 #else /* DEBUG */
1171 #define LCK_SPIN_ASSERT_DEBUG(lck, type)
1172 #endif /* DEBUG */
1173 
1174 #define LCK_ASSERT_OWNED                1
1175 #define LCK_ASSERT_NOTOWNED             2
1176 
1177 #ifdef  MACH_KERNEL_PRIVATE
1178 
1179 typedef struct lck_spinlock_to_info {
1180 	void                   *lock;
1181 #if DEBUG || DEVELOPMENT
1182 	uintptr_t               owner_thread_orig;
1183 #endif /* DEBUG || DEVELOPMENT */
1184 	uintptr_t               owner_thread_cur;
1185 	int                     owner_cpu;
1186 	uint32_t                extra;
1187 } *lck_spinlock_to_info_t;
1188 
1189 extern volatile lck_spinlock_to_info_t lck_spinlock_timeout_in_progress;
1190 PERCPU_DECL(struct lck_spinlock_to_info, lck_spinlock_to_info);
1191 
1192 typedef struct lck_tktlock_pv_info {
1193 	void                   *ltpi_lck;
1194 	uint8_t                 ltpi_wt;
1195 } *lck_tktlock_pv_info_t;
1196 
1197 PERCPU_DECL(struct lck_tktlock_pv_info, lck_tktlock_pv_info);
1198 
1199 extern void             lck_spinlock_timeout_set_orig_owner(
1200 	uintptr_t               owner);
1201 
1202 extern void             lck_spinlock_timeout_set_orig_ctid(
1203 	uint32_t                ctid);
1204 
1205 extern lck_spinlock_to_info_t lck_spinlock_timeout_hit(
1206 	void                   *lck,
1207 	uintptr_t               owner);
1208 
1209 #endif /* MACH_KERNEL_PRIVATE */
1210 #if  XNU_KERNEL_PRIVATE
1211 
1212 uintptr_t unslide_for_kdebug(const void* object) __pure2;
1213 
1214 struct lck_attr_startup_spec {
1215 	lck_attr_t              *lck_attr;
1216 	uint32_t                lck_attr_set_flags;
1217 	uint32_t                lck_attr_clear_flags;
1218 };
1219 
1220 struct lck_spin_startup_spec {
1221 	lck_spin_t              *lck;
1222 	lck_grp_t               *lck_grp;
1223 	lck_attr_t              *lck_attr;
1224 };
1225 
1226 struct lck_ticket_startup_spec {
1227 	lck_ticket_t            *lck;
1228 	lck_grp_t               *lck_grp;
1229 };
1230 
1231 extern void             lck_attr_startup_init(
1232 	struct lck_attr_startup_spec *spec);
1233 
1234 extern void             lck_spin_startup_init(
1235 	struct lck_spin_startup_spec *spec);
1236 
1237 extern void             lck_ticket_startup_init(
1238 	struct lck_ticket_startup_spec *spec);
1239 
1240 /*
1241  * Auto-initializing locks declarations
1242  * ------------------------------------
1243  *
1244  * Unless you need to configure your locks in very specific ways,
1245  * there is no point creating explicit lock attributes. For most
1246  * static locks, these declaration macros can be used:
1247  *
1248  * - LCK_SPIN_DECLARE for spinlocks,
1249  * - LCK_MTX_DECLARE for mutexes,
1250  *
1251  * For cases when some particular attributes need to be used,
1252  * these come in *_ATTR variants that take a variable declared with
1253  * LCK_ATTR_DECLARE as an argument.
1254  */
1255 #define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \
1256 	SECURITY_READ_ONLY_LATE(lck_attr_t) var; \
1257 	static __startup_data struct lck_attr_startup_spec \
1258 	__startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \
1259 	STARTUP_ARG(LOCKS, STARTUP_RANK_SECOND, lck_attr_startup_init, \
1260 	    &__startup_lck_attr_spec_ ## var)
1261 
1262 #define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \
1263 	lck_spin_t var; \
1264 	static __startup_data struct lck_spin_startup_spec \
1265 	__startup_lck_spin_spec_ ## var = { &var, grp, attr }; \
1266 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_spin_startup_init, \
1267 	    &__startup_lck_spin_spec_ ## var)
1268 
1269 #define LCK_SPIN_DECLARE(var, grp) \
1270 	LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
1271 
1272 #define LCK_TICKET_DECLARE(var, grp) \
1273 	lck_ticket_t var; \
1274 	static __startup_data struct lck_ticket_startup_spec \
1275 	__startup_lck_ticket_spec_ ## var = { &var, grp }; \
1276 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_ticket_startup_init, \
1277 	    &__startup_lck_ticket_spec_ ## var)
1278 
1279 #endif /* XNU_KERNEL_PRIVATE */
1280 
1281 __END_DECLS
1282 
1283 #endif /* _KERN_LOCKS_H_ */
1284