xref: /xnu-10002.1.13/osfmk/kern/locks.h (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_LOCKS_H_
30 #define _KERN_LOCKS_H_
31 
32 #include <sys/cdefs.h>
33 #include <sys/appleapiopts.h>
34 
35 #include <mach/boolean.h>
36 #include <machine/locks.h>
37 
38 #include <kern/kern_types.h>
39 #include <kern/lock_attr.h>
40 #include <kern/lock_group.h>
41 #include <kern/lock_mtx.h>
42 #include <kern/lock_rw.h>
43 #include <kern/lock_types.h>
44 #ifdef KERNEL_PRIVATE
45 #include <kern/ticket_lock.h>
46 #endif
47 #ifdef  XNU_KERNEL_PRIVATE
48 #include <kern/startup.h>
49 #include <kern/percpu.h>
50 #endif /* XNU_KERNEL_PRIVATE */
51 
52 __BEGIN_DECLS
53 
54 #define decl_lck_spin_data(class, name)     class lck_spin_t name
55 
56 extern lck_spin_t      *lck_spin_alloc_init(
57 	lck_grp_t               *grp,
58 	lck_attr_t              *attr);
59 
60 extern void             lck_spin_init(
61 	lck_spin_t              *lck,
62 	lck_grp_t               *grp,
63 	lck_attr_t              *attr);
64 
65 extern void             lck_spin_lock(
66 	lck_spin_t              *lck);
67 
68 extern void             lck_spin_lock_grp(
69 	lck_spin_t              *lck,
70 	lck_grp_t               *grp);
71 
72 extern void             lck_spin_unlock(
73 	lck_spin_t              *lck);
74 
75 extern void             lck_spin_destroy(
76 	lck_spin_t              *lck,
77 	lck_grp_t               *grp);
78 
79 extern void             lck_spin_free(
80 	lck_spin_t              *lck,
81 	lck_grp_t               *grp);
82 
83 extern wait_result_t    lck_spin_sleep(
84 	lck_spin_t              *lck,
85 	lck_sleep_action_t      lck_sleep_action,
86 	event_t                 event,
87 	wait_interrupt_t        interruptible);
88 
89 extern wait_result_t    lck_spin_sleep_grp(
90 	lck_spin_t              *lck,
91 	lck_sleep_action_t      lck_sleep_action,
92 	event_t                 event,
93 	wait_interrupt_t        interruptible,
94 	lck_grp_t               *grp);
95 
96 extern wait_result_t    lck_spin_sleep_deadline(
97 	lck_spin_t              *lck,
98 	lck_sleep_action_t      lck_sleep_action,
99 	event_t                 event,
100 	wait_interrupt_t        interruptible,
101 	uint64_t                deadline);
102 
103 #ifdef  KERNEL_PRIVATE
104 
105 extern void             lck_spin_lock_nopreempt(
106 	lck_spin_t              *lck);
107 
108 extern void             lck_spin_lock_nopreempt_grp(
109 	lck_spin_t              *lck, lck_grp_t *grp);
110 
111 extern void             lck_spin_unlock_nopreempt(
112 	lck_spin_t              *lck);
113 
114 extern boolean_t        lck_spin_try_lock_grp(
115 	lck_spin_t              *lck,
116 	lck_grp_t               *grp);
117 
118 extern boolean_t        lck_spin_try_lock(
119 	lck_spin_t              *lck);
120 
121 extern boolean_t        lck_spin_try_lock_nopreempt(
122 	lck_spin_t              *lck);
123 
124 extern boolean_t        lck_spin_try_lock_nopreempt_grp(
125 	lck_spin_t              *lck,
126 	lck_grp_t               *grp);
127 
128 /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */
129 extern boolean_t        kdp_lck_spin_is_acquired(
130 	lck_spin_t              *lck);
131 
132 /*
133  * Name: lck_spin_sleep_with_inheritor
134  *
135  * Description:
136  *   deschedule the current thread and wait on the waitq associated with event
137  *   to be woken up.
138  *
139  *   While waiting, the sched priority of the waiting thread will contribute to
140  *   the push of the event that will be directed to the inheritor specified.
141  *
142  *   An interruptible mode and deadline can be specified to return earlier from
143  *   the wait.
144  *
145  * Args:
146  *   Arg1: lck_spin_t lock used to protect the sleep.
147  *         The lock will be dropped while sleeping and reaquired before
148  *         returning according to the sleep action specified.
149  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
150  *   Arg3: event to wait on.
151  *   Arg4: thread to propagate the event push to.
152  *   Arg5: interruptible flag for wait.
153  *   Arg6: deadline for wait.
154  *
155  * Conditions:
156  *   Lock must be held.
157  *
158  *   Returns with the lock held according to the sleep action specified.
159  *   Lock will be dropped while waiting.
160  *
161  *   The inheritor specified cannot return to user space or exit until another
162  *   inheritor is specified for the event or a wakeup for the event is called.
163  *
164  * Returns: result of the wait.
165  */
166 extern wait_result_t lck_spin_sleep_with_inheritor(
167 	lck_spin_t              *lock,
168 	lck_sleep_action_t      lck_sleep_action,
169 	event_t                 event,
170 	thread_t                inheritor,
171 	wait_interrupt_t        interruptible,
172 	uint64_t                deadline);
173 
174 #if MACH_KERNEL_PRIVATE
175 
176 /*
177  * Name: hw_lck_ticket_sleep_with_inheritor
178  *
179  * Description:
180  *   deschedule the current thread and wait on the waitq associated with event
181  *   to be woken up.
182  *
183  *   While waiting, the sched priority of the waiting thread will contribute to
184  *   the push of the event that will be directed to the inheritor specified.
185  *
186  *   An interruptible mode and deadline can be specified to return earlier from
187  *   the wait.
188  *
189  * Args:
190  *   Arg1: hw_lck_ticket_t lock used to protect the sleep.
191  *         The lock will be dropped while sleeping and reaquired before
192  *         returning according to the sleep action specified.
193  *   Arg2: lck_grp_t associated with the lock.
194  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
195  *   Arg3: event to wait on.
196  *   Arg5: thread to propagate the event push to.
197  *   Arg6: interruptible flag for wait.
198  *   Arg7: deadline for wait.
199  *
200  * Conditions:
201  *   Lock must be held.
202  *
203  *   Returns with the lock held according to the sleep action specified.
204  *
205  *   Lock will be dropped while waiting.
206  *
207  *   The inheritor specified cannot return to user space or exit until another
208  *   inheritor is specified for the event or a wakeup for the event is called.
209  *
210  * Returns: result of the wait.
211  */
212 extern wait_result_t hw_lck_ticket_sleep_with_inheritor(
213 	hw_lck_ticket_t         *lock,
214 	lck_grp_t               *grp,
215 	lck_sleep_action_t      lck_sleep_action,
216 	event_t                 event,
217 	thread_t                inheritor,
218 	wait_interrupt_t        interruptible,
219 	uint64_t                deadline);
220 
221 #endif
222 
223 /*
224  * Name: lck_ticket_sleep_with_inheritor
225  *
226  * Description:
227  *   deschedule the current thread and wait on the waitq associated with event
228  *   to be woken up.
229  *
230  *   While waiting, the sched priority of the waiting thread will contribute to
231  *   the push of the event that will be directed to the inheritor specified.
232  *
233  *   An interruptible mode and deadline can be specified to return earlier from
234  *   the wait.
235  *
236  * Args:
237  *   Arg1: lck_ticket_t lock used to protect the sleep.
238  *         The lock will be dropped while sleeping and reaquired before
239  *         returning according to the sleep action specified.
240  *   Arg2: lck_grp_t associated with the lock.
241  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
242  *   Arg3: event to wait on.
243  *   Arg5: thread to propagate the event push to.
244  *   Arg6: interruptible flag for wait.
245  *   Arg7: deadline for wait.
246  *
247  * Conditions:
248  *   Lock must be held.
249  *
250  *   Returns with the lock held according to the sleep action specified.
251  *
252  *   Lock will be dropped while waiting.
253  *
254  *   The inheritor specified cannot return to user space or exit until another
255  *   inheritor is specified for the event or a wakeup for the event is called.
256  *
257  * Returns: result of the wait.
258  */
259 extern wait_result_t lck_ticket_sleep_with_inheritor(
260 	lck_ticket_t            *lock,
261 	lck_grp_t               *grp,
262 	lck_sleep_action_t      lck_sleep_action,
263 	event_t                 event,
264 	thread_t                inheritor,
265 	wait_interrupt_t        interruptible,
266 	uint64_t                deadline);
267 
268 /*
269  * Name: lck_mtx_sleep_with_inheritor
270  *
271  * Description:
272  *   deschedule the current thread and wait on the waitq associated with event
273  *   to be woken up.
274  *
275  *   While waiting, the sched priority of the waiting thread will contribute to
276  *   the push of the event that will be directed to the inheritor specified.
277  *
278  *   An interruptible mode and deadline can be specified to return earlier from
279  *   the wait.
280  *
281  * Args:
282  *   Arg1: lck_mtx_t lock used to protect the sleep.
283  *         The lock will be dropped while sleeping and reaquired before
284  *         returning according to the sleep action specified.
285  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
286  *   Arg3: event to wait on.
287  *   Arg4: thread to propagate the event push to.
288  *   Arg5: interruptible flag for wait.
289  *   Arg6: deadline for wait.
290  *
291  * Conditions:
292  *   Lock must be held.
293  *
294  *   Returns with the lock held according to the sleep action specified.
295  *
296  *   Lock will be dropped while waiting.
297  *
298  *   The inheritor specified cannot return to user space or exit until another
299  *   inheritor is specified for the event or a wakeup for the event is called.
300  *
301  * Returns: result of the wait.
302  */
303 extern wait_result_t lck_mtx_sleep_with_inheritor(
304 	lck_mtx_t               *lock,
305 	lck_sleep_action_t      lck_sleep_action,
306 	event_t                 event,
307 	thread_t                inheritor,
308 	wait_interrupt_t        interruptible,
309 	uint64_t                deadline);
310 
311 /*
312  * Name: lck_mtx_sleep_with_inheritor
313  *
314  * Description:
315  *   deschedule the current thread and wait on the waitq associated with event
316  *   to be woken up.
317  *
318  *   While waiting, the sched priority of the waiting thread will contribute to
319  *   the push of the event that will be directed to the inheritor specified.
320  *
321  *   An interruptible mode and deadline can be specified to return earlier from
322  *   the wait.
323  *
324  * Args:
325  *   Arg1: lck_rw_t lock used to protect the sleep.
326  *         The lock will be dropped while sleeping and reaquired before
327  *         returning according to the sleep action specified.
328  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE.
329  *   Arg3: event to wait on.
330  *   Arg4: thread to propagate the event push to.
331  *   Arg5: interruptible flag for wait.
332  *   Arg6: deadline for wait.
333  *
334  * Conditions:
335  *   Lock must be held.
336  *
337  *   Returns with the lock held according to the sleep action specified.
338  *
339  *   Lock will be dropped while waiting.
340  *
341  *   The inheritor specified cannot return to user space or exit until another
342  *   inheritor is specified for the event or a wakeup for the event is called.
343  *
344  * Returns: result of the wait.
345  */
346 extern wait_result_t lck_rw_sleep_with_inheritor(
347 	lck_rw_t                *lock,
348 	lck_sleep_action_t      lck_sleep_action,
349 	event_t                 event,
350 	thread_t                inheritor,
351 	wait_interrupt_t        interruptible,
352 	uint64_t                deadline);
353 
354 /*
355  * Name: wakeup_one_with_inheritor
356  *
357  * Description:
358  *   Wake up one waiter for event if any.
359  *
360  *   The thread woken up will be the one with the higher sched priority waiting
361  *   on event.
362  *
363  *   The push for the event will be transferred from the last inheritor to the
364  *   woken up thread.
365  *
366  * Args:
367  *   Arg1: event to wake from.
368  *   Arg2: wait result to pass to the woken up thread.
369  *   Arg3: pointer for storing the thread wokenup.
370  *
371  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
372  *
373  * Conditions:
374  *   The new woken up inheritor cannot return to user space or exit until
375  *   another inheritor is specified for the event or a new wakeup for the event
376  *   is performed.
377  *
378  *   A reference for the woken thread is acquired.
379  *
380  *   NOTE: this cannot be called from interrupt context.
381  */
382 extern kern_return_t wakeup_one_with_inheritor(
383 	event_t                 event,
384 	wait_result_t           result,
385 	lck_wake_action_t       action,
386 	thread_t                *thread_wokenup);
387 
388 /*
389  * Name: wakeup_all_with_inheritor
390  *
391  * Description: wake up all waiters waiting for event. The old inheritor will lose the push.
392  *
393  * Args:
394  *   Arg1: event to wake from.
395  *   Arg2: wait result to pass to the woken up threads.
396  *
397  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
398  *
399  * Conditions: NOTE: this cannot be called from interrupt context.
400  */
401 extern kern_return_t wakeup_all_with_inheritor(
402 	event_t                 event,
403 	wait_result_t           result);
404 
405 /*
406  * Name: change_sleep_inheritor
407  *
408  * Description:
409  *   Redirect the push of the waiting threads of event to the new inheritor specified.
410  *
411  * Args:
412  *   Arg1: event to redirect the push.
413  *   Arg2: new inheritor for event.
414  *
415  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
416  *
417  * Conditions:
418  *   In case of success, the new inheritor cannot return to user space or exit
419  *   until another inheritor is specified for the event or a wakeup for the
420  *   event is called.
421  *
422  *   NOTE: this cannot be called from interrupt context.
423  */
424 extern kern_return_t change_sleep_inheritor(
425 	event_t                 event,
426 	thread_t                inheritor);
427 
428 
429 #if XNU_KERNEL_PRIVATE
430 
431 /*
432  * Bits layout of cond_swi_var32/cond_swi_var64.
433  * First SWI_COND_OWNER_BITS are reserved for the owner
434  * the remaining can be used by the caller
435  */
436 #define SWI_COND_OWNER_BITS     20
437 #define SWI_COND_CALLER_BITS    (32 - SWI_COND_OWNER_BITS)
438 
439 typedef struct cond_swi_var32 {
440 	union {
441 		uint32_t cond32_data;
442 		struct {
443 			uint32_t cond32_owner: SWI_COND_OWNER_BITS,
444 			    cond32_caller_bits: SWI_COND_CALLER_BITS;
445 		};
446 	};
447 } cond_swi_var32_s;
448 
449 typedef struct cond_swi_var64 {
450 	union {
451 		uint64_t cond64_data;
452 		struct {
453 			uint32_t cond64_owner: SWI_COND_OWNER_BITS,
454 			    cond64_caller_bits: SWI_COND_CALLER_BITS;
455 			uint32_t cond64_caller_extra;
456 		};
457 	};
458 } cond_swi_var64_s;
459 
460 typedef struct cond_swi_var *cond_swi_var_t;
461 
462 /*
463  * Name: cond_sleep_with_inheritor32
464  *
465  * Description: Conditionally sleeps with inheritor, with condition variable of 32bits.
466  *              Allows a thread to conditionally sleep while indicating which thread should
467  *              inherit the priority push associated with the condition.
468  *              The condition should be expressed through a cond_swi_var32_s pointer.
469  *              The condition needs to be populated by the caller with the ctid of the
470  *              thread that should inherit the push. The remaining bits of the condition
471  *              can be used by the caller to implement its own synchronization logic.
472  *              A copy of the condition value observed by the caller when it decided to call
473  *              this function should be provided to prevent races with matching wakeups.
474  *              This function will atomically check the value stored in the condition against
475  *              the expected/observed one provided. If the check doesn't pass the thread will not
476  *              sleep and the function will return.
477  *              The ctid provided in the condition will be used only after a successful
478  *              check.
479  *
480  * Args:
481  *   Arg1: cond_swi_var32_s pointer that stores the condition to check.
482  *   Arg2: cond_swi_var32_s observed value to check for conditionally sleep.
483  *   Arg3: interruptible flag for wait.
484  *   Arg4: deadline for wait.
485  *
486  * Conditions:
487  *   The inheritor specified cannot return to user space or exit until another
488  *   inheritor is specified for the cond or a wakeup for the cond is called.
489  *
490  * Returns: result of the wait.
491  */
492 extern wait_result_t cond_sleep_with_inheritor32(
493 	cond_swi_var_t          cond,
494 	cond_swi_var32_s        expected_cond,
495 	wait_interrupt_t        interruptible,
496 	uint64_t                deadline);
497 
498 /*
499  * Name: cond_sleep_with_inheritor64
500  *
501  * Description: Conditionally sleeps with inheritor, with condition variable of 64bits.
502  *              Allows a thread to conditionally sleep while indicating which thread should
503  *              inherit the priority push associated with the condition.
504  *              The condition should be expressed through a cond_swi_var64_s pointer.
505  *              The condition needs to be populated by the caller with the ctid of the
506  *              thread that should inherit the push. The remaining bits of the condition
507  *              can be used by the caller to implement its own synchronization logic.
508  *              A copy of the condition value observed by the caller when it decided to call
509  *              this function should be provided to prevent races with matching wakeups.
510  *              This function will atomically check the value stored in the condition against
511  *              the expected/observed one provided. If the check doesn't pass the thread will not
512  *              sleep and the function will return.
513  *              The ctid provided in the condition will be used only after a successful
514  *              check.
515  *
516  * Args:
517  *   Arg1: cond_swi_var64_s pointer that stores the condition to check.
518  *   Arg2: cond_swi_var64_s observed value to check for conditionally sleep.
519  *   Arg3: interruptible flag for wait.
520  *   Arg4: deadline for wait.
521  *
522  * Conditions:
523  *   The inheritor specified cannot return to user space or exit until another
524  *   inheritor is specified for the cond or a wakeup for the cond is called.
525  *
526  * Returns: result of the wait.
527  */
528 extern wait_result_t cond_sleep_with_inheritor64(
529 	cond_swi_var_t          cond,
530 	cond_swi_var64_s        expected_cond,
531 	wait_interrupt_t        interruptible,
532 	uint64_t                deadline);
533 
534 /*
535  * Name: cond_sleep_with_inheritor64_mask
536  *
537  * Description: Conditionally sleeps with inheritor, with condition variable of 64bits.
538  *              Allows a thread to conditionally sleep while indicating which thread should
539  *              inherit the priority push associated with the condition.
540  *              The condition should be expressed through a cond_swi_var64_s pointer.
541  *              The condition needs to be populated by the caller with the ctid of the
542  *              thread that should inherit the push. The remaining bits of the condition
543  *              can be used by the caller to implement its own synchronization logic.
544  *              A copy of the condition value observed by the caller when it decided to call
545  *              this function should be provided to prevent races with matching wakeups.
546  *              This function will atomically check the value stored in the condition against
547  *              the expected/observed one provided only for the bits that are set in the mask.
548  *              If the check doesn't pass the thread will not sleep and the function will return.
549  *              The ctid provided in the condition will be used only after a successful
550  *              check.
551  *
552  * Args:
553  *   Arg1: cond_swi_var64_s pointer that stores the condition to check.
554  *   Arg2: cond_swi_var64_s observed value to check for conditionally sleep.
555  *   Arg3: mask to apply to the condition to check.
556  *   Arg4: interruptible flag for wait.
557  *   Arg5: deadline for wait.
558  *
559  * Conditions:
560  *   The inheritor specified cannot return to user space or exit until another
561  *   inheritor is specified for the cond or a wakeup for the cond is called.
562  *
563  * Returns: result of the wait.
564  */
565 extern wait_result_t cond_sleep_with_inheritor64_mask(
566 	cond_swi_var_t          cond,
567 	cond_swi_var64_s        expected_cond,
568 	uint64_t                check_mask,
569 	wait_interrupt_t        interruptible,
570 	uint64_t                deadline);
571 
572 /*
573  * Name: cond_wakeup_one_with_inheritor
574  *
575  * Description: Wake up one waiter waiting on the condition (if any).
576  *              The thread woken up will be the one with the higher sched priority waiting on the condition.
577  *              The push for the condition will be transferred from the last inheritor to the woken up thread.
578  *
579  * Args:
580  *   Arg1: condition to wake from.
581  *   Arg2: wait result to pass to the woken up thread.
582  *   Arg3: pointer for storing the thread wokenup.
583  *
584  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
585  *
586  * Conditions:
587  *   The new woken up inheritor cannot return to user space or exit until
588  *   another inheritor is specified for the event or a new wakeup for the event
589  *   is performed.
590  *
591  *   A reference for the woken thread is acquired.
592  *
593  *   NOTE: this cannot be called from interrupt context.
594  */
595 extern kern_return_t cond_wakeup_one_with_inheritor(
596 	cond_swi_var_t          cond,
597 	wait_result_t           result,
598 	lck_wake_action_t       action,
599 	thread_t                *thread_wokenup);
600 
601 /*
602  * Name: cond_wakeup_all_with_inheritor
603  *
604  * Description: Wake up all waiters waiting on the same condition. The old inheritor will lose the push.
605  *
606  * Args:
607  *   Arg1: condition to wake from.
608  *   Arg2: wait result to pass to the woken up threads.
609  *
610  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
611  *
612  * Conditions: NOTE: this cannot be called from interrupt context.
613  */
614 extern kern_return_t cond_wakeup_all_with_inheritor(
615 	cond_swi_var_t          cond,
616 	wait_result_t           result);
617 
618 /*
619  * gate structure
620  */
621 typedef struct gate {
622 	uintptr_t         gt_data;                // thread holder, interlock bit and waiter bit
623 	struct turnstile *gt_turnstile;           // turnstile, protected by the interlock bit
624 	union {
625 		struct {
626 			uint32_t  gt_refs:16,             // refs using the gate, protected by interlock bit
627 			    gt_alloc:1,                   // gate was allocated with gate_alloc_init
628 			    gt_type:2,                    // type bits for validity
629 			    gt_flags_pad:13;              // unused
630 		};
631 		uint32_t  gt_flags;
632 	};
633 } gate_t;
634 
635 #else /* XNU_KERNEL_PRIVATE */
636 
637 typedef struct gate {
638 	uintptr_t         opaque1;
639 	uintptr_t         opaque2;
640 	uint32_t          opaque3;
641 } gate_t;
642 
643 #endif /* XNU_KERNEL_PRIVATE */
644 
645 /*
646  * Possible gate_wait_result_t values.
647  */
648 __options_decl(gate_wait_result_t, unsigned int, {
649 	GATE_HANDOFF      = 0x00,         /* gate was handedoff to current thread */
650 	GATE_OPENED       = 0x01,         /* gate was opened */
651 	GATE_TIMED_OUT    = 0x02,         /* wait timedout */
652 	GATE_INTERRUPTED  = 0x03,         /* wait was interrupted */
653 });
654 
655 /*
656  * Gate flags used by gate_assert
657  */
658 __options_decl(gate_assert_flags_t, unsigned int, {
659 	GATE_ASSERT_CLOSED = 0x00,         /* asserts the gate is currently closed */
660 	GATE_ASSERT_OPEN   = 0x01,         /* asserts the gate is currently open */
661 	GATE_ASSERT_HELD   = 0x02,         /* asserts the gate is closed and held by current_thread() */
662 });
663 
664 /*
665  * Gate flags used by gate_handoff
666  */
667 __options_decl(gate_handoff_flags_t, unsigned int, {
668 	GATE_HANDOFF_DEFAULT            = 0x00,         /* a waiter must exist to handoff the gate */
669 	GATE_HANDOFF_OPEN_IF_NO_WAITERS = 0x1,         /* behave like a gate_open() if there are no waiters */
670 });
671 
672 /*
673  * Name: decl_lck_rw_gate_data
674  *
675  * Description: declares a gate variable with specified storage class.
676  *              The gate itself will be stored in this variable and it is the caller's responsibility
677  *              to ensure that this variable's memory is going to be accessible by all threads that will use
678  *              the gate.
679  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
680  *              be used in every thread.
681  *
682  *              The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with
683  *              lck_rw_gate_destroy() when not needed anymore.
684  *
685  *              The gate will be used in conjunction with a lck_rw_t.
686  *
687  * Args:
688  *   Arg1: storage class.
689  *   Arg2: variable name.
690  */
691 #define decl_lck_rw_gate_data(class, name)                              class gate_t name
692 
693 /*
694  * Name: lck_rw_gate_init
695  *
696  * Description: initializes a variable declared with decl_lck_rw_gate_data.
697  *
698  * Args:
699  *   Arg1: lck_rw_t lock used to protect the gate.
700  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
701  */
702 extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate);
703 
704 /*
705  * Name: lck_rw_gate_destroy
706  *
707  * Description: destroys a variable previously initialized
708  *              with lck_rw_gate_init().
709  *
710  * Args:
711  *   Arg1: lck_rw_t lock used to protect the gate.
712  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
713  */
714 extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate);
715 
716 /*
717  * Name: lck_rw_gate_alloc_init
718  *
719  * Description: allocates and initializes a gate_t.
720  *
721  * Args:
722  *   Arg1: lck_rw_t lock used to protect the gate.
723  *
724  * Returns:
725  *         gate_t allocated.
726  */
727 extern gate_t* lck_rw_gate_alloc_init(lck_rw_t *lock);
728 
729 /*
730  * Name: lck_rw_gate_free
731  *
732  * Description: destroys and tries to free a gate previously allocated
733  *              with lck_rw_gate_alloc_init().
734  *              The gate free might be delegated to the last thread returning
735  *              from the gate_wait().
736  *
737  * Args:
738  *   Arg1: lck_rw_t lock used to protect the gate.
739  *   Arg2: pointer to the gate obtained with lck_rw_gate_alloc_init().
740  */
741 extern void lck_rw_gate_free(lck_rw_t *lock, gate_t *gate);
742 
743 /*
744  * Name: lck_rw_gate_try_close
745  *
746  * Description: Tries to close the gate.
747  *              In case of success the current thread will be set as
748  *              the holder of the gate.
749  *
750  * Args:
751  *   Arg1: lck_rw_t lock used to protect the gate.
752  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
753  *
754  * Conditions: Lock must be held. Returns with the lock held.
755  *
756  * Returns:
757  *   KERN_SUCCESS in case the gate was successfully closed. The current thread
758  *   is the new holder of the gate.
759  *
760  *   A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called
761  *   later on to wake up possible waiters on the gate before returning to
762  *   userspace.
763  *
764  *   If the intent is to conditionally probe the gate before waiting, the lock
765  *   must not be dropped between the calls to lck_rw_gate_try_close() and
766  *   lck_rw_gate_wait().
767  *
768  *   KERN_FAILURE in case the gate was already closed.
769  *   Will panic if the current thread was already the holder of the gate.
770  *
771  *   lck_rw_gate_wait() should be called instead if the intent is to
772  *   unconditionally wait on this gate.
773  *
774  *   The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should
775  *   be done without dropping the lock that is protecting the gate in between.
776  */
777 extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate);
778 
779 /*
780  * Name: lck_rw_gate_close
781  *
782  * Description: Closes the gate. The current thread will be set as
783  *              the holder of the gate. Will panic if the gate is already closed.
784  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
785  *              to wake up possible waiters on the gate before returning to userspace.
786  *
787  * Args:
788  *   Arg1: lck_rw_t lock used to protect the gate.
789  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
790  *
791  * Conditions: Lock must be held. Returns with the lock held.
792  *             The gate must be open.
793  *
794  */
795 extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate);
796 
797 
798 /*
799  * Name: lck_rw_gate_open
800  *
801  * Description: Opens the gate and wakes up possible waiters.
802  *
803  * Args:
804  *   Arg1: lck_rw_t lock used to protect the gate.
805  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
806  *
807  * Conditions: Lock must be held. Returns with the lock held.
808  *             The current thread must be the holder of the gate.
809  *
810  */
811 extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate);
812 
813 /*
814  * Name: lck_rw_gate_handoff
815  *
816  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
817  *              priority will be selected as the new holder of the gate, and woken up,
818  *              with the gate remaining in the closed state throughout.
819  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
820  *              will be returned.
821  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
822  *              case no waiters were found.
823  *
824  *
825  * Args:
826  *   Arg1: lck_rw_t lock used to protect the gate.
827  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
828  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
829  *
830  * Conditions: Lock must be held. Returns with the lock held.
831  *             The current thread must be the holder of the gate.
832  *
833  * Returns:
834  *          KERN_SUCCESS in case one of the waiters became the new holder.
835  *          KERN_NOT_WAITING in case there were no waiters.
836  *
837  */
838 extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, gate_handoff_flags_t flags);
839 
840 /*
841  * Name: lck_rw_gate_steal
842  *
843  * Description: Set the current ownership of the gate. It sets the current thread as the
844  *              new holder of the gate.
845  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
846  *              to wake up possible waiters on the gate before returning to userspace.
847  *              NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff()
848  *              anymore.
849  *
850  *
851  * Args:
852  *   Arg1: lck_rw_t lock used to protect the gate.
853  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
854  *
855  * Conditions: Lock must be held. Returns with the lock held.
856  *             The gate must be closed and the current thread must not already be the holder.
857  *
858  */
859 extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate);
860 
861 /*
862  * Name: lck_rw_gate_wait
863  *
864  * Description: Waits for the current thread to become the holder of the gate or for the
865  *              gate to become open. An interruptible mode and deadline can be specified
866  *              to return earlier from the wait.
867  *
868  * Args:
869  *   Arg1: lck_rw_t lock used to protect the gate.
870  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
871  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE, LCK_SLEEP_UNLOCK.
872  *   Arg3: interruptible flag for wait.
873  *   Arg4: deadline
874  *
875  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
876  *             Lock will be dropped while waiting.
877  *             The gate must be closed.
878  *
879  * Returns: Reason why the thread was woken up.
880  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
881  *                         A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on.
882  *                         to wake up possible waiters on the gate before returning to userspace.
883  *          GATE_OPENED - the gate was opened by the holder.
884  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
885  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
886  */
887 extern gate_wait_result_t lck_rw_gate_wait(
888 	lck_rw_t               *lock,
889 	gate_t                 *gate,
890 	lck_sleep_action_t      lck_sleep_action,
891 	wait_interrupt_t        interruptible,
892 	uint64_t                deadline);
893 
894 /*
895  * Name: lck_rw_gate_assert
896  *
897  * Description: asserts that the gate is in the specified state.
898  *
899  * Args:
900  *   Arg1: lck_rw_t lock used to protect the gate.
901  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
902  *   Arg3: flags to specified assert type.
903  *         GATE_ASSERT_CLOSED - the gate is currently closed
904  *         GATE_ASSERT_OPEN - the gate is currently opened
905  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
906  */
907 extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, gate_assert_flags_t flags);
908 
909 /*
910  * Name: decl_lck_mtx_gate_data
911  *
912  * Description: declares a gate variable with specified storage class.
913  *              The gate itself will be stored in this variable and it is the caller's responsibility
914  *              to ensure that this variable's memory is going to be accessible by all threads that will use
915  *              the gate.
916  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
917  *              be used in every thread.
918  *
919  *              The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with
920  *              lck_mtx_gate_destroy() when not needed anymore.
921  *
922  *              The gate will be used in conjunction with a lck_mtx_t.
923  *
924  * Args:
925  *   Arg1: storage class.
926  *   Arg2: variable name.
927  */
928 #define decl_lck_mtx_gate_data(class, name)                             class gate_t name
929 
930 /*
931  * Name: lck_mtx_gate_init
932  *
933  * Description: initializes a variable declared with decl_lck_mtx_gate_data.
934  *
935  * Args:
936  *   Arg1: lck_mtx_t lock used to protect the gate.
937  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
938  */
939 extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate);
940 
941 /*
942  * Name: lck_mtx_gate_destroy
943  *
944  * Description: destroys a variable previously initialized
945  *              with lck_mtx_gate_init().
946  *
947  * Args:
948  *   Arg1: lck_mtx_t lock used to protect the gate.
949  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
950  */
951 extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate);
952 
953 /*
954  * Name: lck_mtx_gate_alloc_init
955  *
956  * Description: allocates and initializes a gate_t.
957  *
958  * Args:
959  *   Arg1: lck_mtx_t lock used to protect the gate.
960  *
961  * Returns:
962  *         gate_t allocated.
963  */
964 extern gate_t* lck_mtx_gate_alloc_init(lck_mtx_t *lock);
965 
966 /*
967  * Name: lck_mtx_gate_free
968  *
969  * Description: destroys and tries to free a gate previously allocated
970  *	        with lck_mtx_gate_alloc_init().
971  *              The gate free might be delegated to the last thread returning
972  *              from the gate_wait().
973  *
974  * Args:
975  *   Arg1: lck_mtx_t lock used to protect the gate.
976  *   Arg2: pointer to the gate obtained with lck_mtx_gate_alloc_init().
977  */
978 extern void lck_mtx_gate_free(lck_mtx_t *lock, gate_t *gate);
979 
980 /*
981  * Name: lck_mtx_gate_try_close
982  *
983  * Description: Tries to close the gate.
984  *              In case of success the current thread will be set as
985  *              the holder of the gate.
986  *
987  * Args:
988  *   Arg1: lck_mtx_t lock used to protect the gate.
989  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
990  *
991  * Conditions: Lock must be held. Returns with the lock held.
992  *
993  * Returns:
994  *   KERN_SUCCESS in case the gate was successfully closed. The current thread
995  *   is the new holder of the gate.
996  *
997  *   A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called
998  *   later on to wake up possible waiters on the gate before returning to
999  *   userspace.
1000  *
1001  *   If the intent is to conditionally probe the gate before waiting, the lock
1002  *   must not be dropped between the calls to lck_mtx_gate_try_close() and
1003  *   lck_mtx_gate_wait().
1004  *
1005  *   KERN_FAILURE in case the gate was already closed. Will panic if the current
1006  *   thread was already the holder of the gate.
1007  *
1008  *   lck_mtx_gate_wait() should be called instead if the intent is to
1009  *   unconditionally wait on this gate.
1010  *
1011  *   The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should
1012  *   be done without dropping the lock that is protecting the gate in between.
1013  */
1014 extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate);
1015 
1016 /*
1017  * Name: lck_mtx_gate_close
1018  *
1019  * Description: Closes the gate. The current thread will be set as
1020  *              the holder of the gate. Will panic if the gate is already closed.
1021  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1022  *              to wake up possible waiters on the gate before returning to userspace.
1023  *
1024  * Args:
1025  *   Arg1: lck_mtx_t lock used to protect the gate.
1026  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1027  *
1028  * Conditions: Lock must be held. Returns with the lock held.
1029  *             The gate must be open.
1030  *
1031  */
1032 extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate);
1033 
1034 /*
1035  * Name: lck_mtx_gate_open
1036  *
1037  * Description: Opens of the gate and wakes up possible waiters.
1038  *
1039  * Args:
1040  *   Arg1: lck_mtx_t lock used to protect the gate.
1041  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1042  *
1043  * Conditions: Lock must be held. Returns with the lock held.
1044  *             The current thread must be the holder of the gate.
1045  *
1046  */
1047 extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate);
1048 
1049 /*
1050  * Name: lck_mtx_gate_handoff
1051  *
1052  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
1053  *              priority will be selected as the new holder of the gate, and woken up,
1054  *              with the gate remaining in the closed state throughout.
1055  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
1056  *              will be returned.
1057  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
1058  *              case no waiters were found.
1059  *
1060  *
1061  * Args:
1062  *   Arg1: lck_mtx_t lock used to protect the gate.
1063  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1064  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
1065  *
1066  * Conditions: Lock must be held. Returns with the lock held.
1067  *             The current thread must be the holder of the gate.
1068  *
1069  * Returns:
1070  *          KERN_SUCCESS in case one of the waiters became the new holder.
1071  *          KERN_NOT_WAITING in case there were no waiters.
1072  *
1073  */
1074 extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, gate_handoff_flags_t flags);
1075 
1076 /*
1077  * Name: lck_mtx_gate_steal
1078  *
1079  * Description: Steals the ownership of the gate. It sets the current thread as the
1080  *              new holder of the gate.
1081  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1082  *              to wake up possible waiters on the gate before returning to userspace.
1083  *              NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff()
1084  *              anymore.
1085  *
1086  *
1087  * Args:
1088  *   Arg1: lck_mtx_t lock used to protect the gate.
1089  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1090  *
1091  * Conditions: Lock must be held. Returns with the lock held.
1092  *             The gate must be closed and the current thread must not already be the holder.
1093  *
1094  */
1095 extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate);
1096 
1097 /*
1098  * Name: lck_mtx_gate_wait
1099  *
1100  * Description: Waits for the current thread to become the holder of the gate or for the
1101  *              gate to become open. An interruptible mode and deadline can be specified
1102  *              to return earlier from the wait.
1103  *
1104  * Args:
1105  *   Arg1: lck_mtx_t lock used to protect the gate.
1106  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1107  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
1108  *   Arg3: interruptible flag for wait.
1109  *   Arg4: deadline
1110  *
1111  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
1112  *             Lock will be dropped while waiting.
1113  *             The gate must be closed.
1114  *
1115  * Returns: Reason why the thread was woken up.
1116  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
1117  *                         A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1118  *                         to wake up possible waiters on the gate before returning to userspace.
1119  *          GATE_OPENED - the gate was opened by the holder.
1120  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
1121  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
1122  */
1123 extern gate_wait_result_t lck_mtx_gate_wait(
1124 	lck_mtx_t              *lock,
1125 	gate_t                 *gate,
1126 	lck_sleep_action_t      lck_sleep_action,
1127 	wait_interrupt_t        interruptible,
1128 	uint64_t                deadline);
1129 
1130 /*
1131  * Name: lck_mtx_gate_assert
1132  *
1133  * Description: asserts that the gate is in the specified state.
1134  *
1135  * Args:
1136  *   Arg1: lck_mtx_t lock used to protect the gate.
1137  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1138  *   Arg3: flags to specified assert type.
1139  *         GATE_ASSERT_CLOSED - the gate is currently closed
1140  *         GATE_ASSERT_OPEN - the gate is currently opened
1141  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
1142  */
1143 extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, gate_assert_flags_t flags);
1144 
1145 extern void             lck_spin_assert(
1146 	const lck_spin_t              *lck,
1147 	unsigned                int    type);
1148 
1149 #if CONFIG_PV_TICKET
1150 __startup_func extern void lck_init_pv(void);
1151 #endif
1152 
1153 #endif  /* KERNEL_PRIVATE */
1154 
1155 #if MACH_ASSERT
1156 #define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type))
1157 #else /* MACH_ASSERT */
1158 #define LCK_SPIN_ASSERT(lck, type)
1159 #endif /* MACH_ASSERT */
1160 
1161 #if DEBUG
1162 #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type))
1163 #else /* DEBUG */
1164 #define LCK_SPIN_ASSERT_DEBUG(lck, type)
1165 #endif /* DEBUG */
1166 
1167 #define LCK_ASSERT_OWNED                1
1168 #define LCK_ASSERT_NOTOWNED             2
1169 
1170 #ifdef  MACH_KERNEL_PRIVATE
1171 
1172 typedef struct lck_spinlock_to_info {
1173 	void                   *lock;
1174 #if DEBUG || DEVELOPMENT
1175 	uintptr_t               owner_thread_orig;
1176 #endif /* DEBUG || DEVELOPMENT */
1177 	uintptr_t               owner_thread_cur;
1178 	int                     owner_cpu;
1179 	uint32_t                extra;
1180 } *lck_spinlock_to_info_t;
1181 
1182 extern volatile lck_spinlock_to_info_t lck_spinlock_timeout_in_progress;
1183 PERCPU_DECL(struct lck_spinlock_to_info, lck_spinlock_to_info);
1184 
1185 typedef struct lck_tktlock_pv_info {
1186 	void                   *ltpi_lck;
1187 	uint8_t                 ltpi_wt;
1188 } *lck_tktlock_pv_info_t;
1189 
1190 PERCPU_DECL(struct lck_tktlock_pv_info, lck_tktlock_pv_info);
1191 
1192 extern void             lck_spinlock_timeout_set_orig_owner(
1193 	uintptr_t               owner);
1194 
1195 extern void             lck_spinlock_timeout_set_orig_ctid(
1196 	uint32_t                ctid);
1197 
1198 extern lck_spinlock_to_info_t lck_spinlock_timeout_hit(
1199 	void                   *lck,
1200 	uintptr_t               owner);
1201 
1202 #endif /* MACH_KERNEL_PRIVATE */
1203 #if  XNU_KERNEL_PRIVATE
1204 
1205 uintptr_t unslide_for_kdebug(const void* object) __pure2;
1206 
1207 struct lck_attr_startup_spec {
1208 	lck_attr_t              *lck_attr;
1209 	uint32_t                lck_attr_set_flags;
1210 	uint32_t                lck_attr_clear_flags;
1211 };
1212 
1213 struct lck_spin_startup_spec {
1214 	lck_spin_t              *lck;
1215 	lck_grp_t               *lck_grp;
1216 	lck_attr_t              *lck_attr;
1217 };
1218 
1219 struct lck_ticket_startup_spec {
1220 	lck_ticket_t            *lck;
1221 	lck_grp_t               *lck_grp;
1222 };
1223 
1224 extern void             lck_attr_startup_init(
1225 	struct lck_attr_startup_spec *spec);
1226 
1227 extern void             lck_spin_startup_init(
1228 	struct lck_spin_startup_spec *spec);
1229 
1230 extern void             lck_ticket_startup_init(
1231 	struct lck_ticket_startup_spec *spec);
1232 
1233 /*
1234  * Auto-initializing locks declarations
1235  * ------------------------------------
1236  *
1237  * Unless you need to configure your locks in very specific ways,
1238  * there is no point creating explicit lock attributes. For most
1239  * static locks, these declaration macros can be used:
1240  *
1241  * - LCK_SPIN_DECLARE for spinlocks,
1242  * - LCK_MTX_DECLARE for mutexes,
1243  *
1244  * For cases when some particular attributes need to be used,
1245  * these come in *_ATTR variants that take a variable declared with
1246  * LCK_ATTR_DECLARE as an argument.
1247  */
1248 #define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \
1249 	SECURITY_READ_ONLY_LATE(lck_attr_t) var; \
1250 	static __startup_data struct lck_attr_startup_spec \
1251 	__startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \
1252 	STARTUP_ARG(LOCKS, STARTUP_RANK_SECOND, lck_attr_startup_init, \
1253 	    &__startup_lck_attr_spec_ ## var)
1254 
1255 #define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \
1256 	lck_spin_t var; \
1257 	static __startup_data struct lck_spin_startup_spec \
1258 	__startup_lck_spin_spec_ ## var = { &var, grp, attr }; \
1259 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_spin_startup_init, \
1260 	    &__startup_lck_spin_spec_ ## var)
1261 
1262 #define LCK_SPIN_DECLARE(var, grp) \
1263 	LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
1264 
1265 #define LCK_TICKET_DECLARE(var, grp) \
1266 	lck_ticket_t var; \
1267 	static __startup_data struct lck_ticket_startup_spec \
1268 	__startup_lck_ticket_spec_ ## var = { &var, grp }; \
1269 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_ticket_startup_init, \
1270 	    &__startup_lck_ticket_spec_ ## var)
1271 
1272 #endif /* XNU_KERNEL_PRIVATE */
1273 
1274 __END_DECLS
1275 
1276 #endif /* _KERN_LOCKS_H_ */
1277