xref: /xnu-8792.61.2/osfmk/kern/locks.h (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2003-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_LOCKS_H_
30 #define _KERN_LOCKS_H_
31 
32 #include <sys/cdefs.h>
33 #include <sys/appleapiopts.h>
34 
35 #include <mach/boolean.h>
36 #include <machine/locks.h>
37 
38 #include <kern/kern_types.h>
39 #include <kern/lock_attr.h>
40 #include <kern/lock_group.h>
41 #include <kern/lock_mtx.h>
42 #include <kern/lock_rw.h>
43 #include <kern/lock_types.h>
44 #ifdef KERNEL_PRIVATE
45 #include <kern/ticket_lock.h>
46 #endif
47 #ifdef  XNU_KERNEL_PRIVATE
48 #include <kern/startup.h>
49 #include <kern/percpu.h>
50 #endif /* XNU_KERNEL_PRIVATE */
51 
52 __BEGIN_DECLS
53 
54 #define decl_lck_spin_data(class, name)     class lck_spin_t name
55 
56 extern lck_spin_t      *lck_spin_alloc_init(
57 	lck_grp_t               *grp,
58 	lck_attr_t              *attr);
59 
60 extern void             lck_spin_init(
61 	lck_spin_t              *lck,
62 	lck_grp_t               *grp,
63 	lck_attr_t              *attr);
64 
65 extern void             lck_spin_lock(
66 	lck_spin_t              *lck);
67 
68 extern void             lck_spin_lock_grp(
69 	lck_spin_t              *lck,
70 	lck_grp_t               *grp);
71 
72 extern void             lck_spin_unlock(
73 	lck_spin_t              *lck);
74 
75 extern void             lck_spin_destroy(
76 	lck_spin_t              *lck,
77 	lck_grp_t               *grp);
78 
79 extern void             lck_spin_free(
80 	lck_spin_t              *lck,
81 	lck_grp_t               *grp);
82 
83 extern wait_result_t    lck_spin_sleep(
84 	lck_spin_t              *lck,
85 	lck_sleep_action_t      lck_sleep_action,
86 	event_t                 event,
87 	wait_interrupt_t        interruptible);
88 
89 extern wait_result_t    lck_spin_sleep_grp(
90 	lck_spin_t              *lck,
91 	lck_sleep_action_t      lck_sleep_action,
92 	event_t                 event,
93 	wait_interrupt_t        interruptible,
94 	lck_grp_t               *grp);
95 
96 extern wait_result_t    lck_spin_sleep_deadline(
97 	lck_spin_t              *lck,
98 	lck_sleep_action_t      lck_sleep_action,
99 	event_t                 event,
100 	wait_interrupt_t        interruptible,
101 	uint64_t                deadline);
102 
103 #ifdef  KERNEL_PRIVATE
104 
105 extern void             lck_spin_lock_nopreempt(
106 	lck_spin_t              *lck);
107 
108 extern void             lck_spin_lock_nopreempt_grp(
109 	lck_spin_t              *lck, lck_grp_t *grp);
110 
111 extern void             lck_spin_unlock_nopreempt(
112 	lck_spin_t              *lck);
113 
114 extern boolean_t        lck_spin_try_lock_grp(
115 	lck_spin_t              *lck,
116 	lck_grp_t               *grp);
117 
118 extern boolean_t        lck_spin_try_lock(
119 	lck_spin_t              *lck);
120 
121 extern boolean_t        lck_spin_try_lock_nopreempt(
122 	lck_spin_t              *lck);
123 
124 extern boolean_t        lck_spin_try_lock_nopreempt_grp(
125 	lck_spin_t              *lck,
126 	lck_grp_t               *grp);
127 
128 /* NOT SAFE: To be used only by kernel debugger to avoid deadlock. */
129 extern boolean_t        kdp_lck_spin_is_acquired(
130 	lck_spin_t              *lck);
131 
132 /*
133  * Name: lck_spin_sleep_with_inheritor
134  *
135  * Description:
136  *   deschedule the current thread and wait on the waitq associated with event
137  *   to be woken up.
138  *
139  *   While waiting, the sched priority of the waiting thread will contribute to
140  *   the push of the event that will be directed to the inheritor specified.
141  *
142  *   An interruptible mode and deadline can be specified to return earlier from
143  *   the wait.
144  *
145  * Args:
146  *   Arg1: lck_spin_t lock used to protect the sleep.
147  *         The lock will be dropped while sleeping and reaquired before
148  *         returning according to the sleep action specified.
149  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
150  *   Arg3: event to wait on.
151  *   Arg4: thread to propagate the event push to.
152  *   Arg5: interruptible flag for wait.
153  *   Arg6: deadline for wait.
154  *
155  * Conditions:
156  *   Lock must be held.
157  *
158  *   Returns with the lock held according to the sleep action specified.
159  *   Lock will be dropped while waiting.
160  *
161  *   The inheritor specified cannot return to user space or exit until another
162  *   inheritor is specified for the event or a wakeup for the event is called.
163  *
164  * Returns: result of the wait.
165  */
166 extern wait_result_t lck_spin_sleep_with_inheritor(
167 	lck_spin_t              *lock,
168 	lck_sleep_action_t      lck_sleep_action,
169 	event_t                 event,
170 	thread_t                inheritor,
171 	wait_interrupt_t        interruptible,
172 	uint64_t                deadline);
173 
174 /*
175  * Name: lck_ticket_sleep_with_inheritor
176  *
177  * Description:
178  *   deschedule the current thread and wait on the waitq associated with event
179  *   to be woken up.
180  *
181  *   While waiting, the sched priority of the waiting thread will contribute to
182  *   the push of the event that will be directed to the inheritor specified.
183  *
184  *   An interruptible mode and deadline can be specified to return earlier from
185  *   the wait.
186  *
187  * Args:
188  *   Arg1: lck_ticket_t lock used to protect the sleep.
189  *         The lock will be dropped while sleeping and reaquired before
190  *         returning according to the sleep action specified.
191  *   Arg2: lck_grp_t associated with the lock.
192  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK.
193  *   Arg3: event to wait on.
194  *   Arg5: thread to propagate the event push to.
195  *   Arg6: interruptible flag for wait.
196  *   Arg7: deadline for wait.
197  *
198  * Conditions:
199  *   Lock must be held.
200  *
201  *   Returns with the lock held according to the sleep action specified.
202  *
203  *   Lock will be dropped while waiting.
204  *
205  *   The inheritor specified cannot return to user space or exit until another
206  *   inheritor is specified for the event or a wakeup for the event is called.
207  *
208  * Returns: result of the wait.
209  */
210 extern wait_result_t lck_ticket_sleep_with_inheritor(
211 	lck_ticket_t            *lock,
212 	lck_grp_t               *grp,
213 	lck_sleep_action_t      lck_sleep_action,
214 	event_t                 event,
215 	thread_t                inheritor,
216 	wait_interrupt_t        interruptible,
217 	uint64_t                deadline);
218 
219 /*
220  * Name: lck_mtx_sleep_with_inheritor
221  *
222  * Description:
223  *   deschedule the current thread and wait on the waitq associated with event
224  *   to be woken up.
225  *
226  *   While waiting, the sched priority of the waiting thread will contribute to
227  *   the push of the event that will be directed to the inheritor specified.
228  *
229  *   An interruptible mode and deadline can be specified to return earlier from
230  *   the wait.
231  *
232  * Args:
233  *   Arg1: lck_mtx_t lock used to protect the sleep.
234  *         The lock will be dropped while sleeping and reaquired before
235  *         returning according to the sleep action specified.
236  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
237  *   Arg3: event to wait on.
238  *   Arg4: thread to propagate the event push to.
239  *   Arg5: interruptible flag for wait.
240  *   Arg6: deadline for wait.
241  *
242  * Conditions:
243  *   Lock must be held.
244  *
245  *   Returns with the lock held according to the sleep action specified.
246  *
247  *   Lock will be dropped while waiting.
248  *
249  *   The inheritor specified cannot return to user space or exit until another
250  *   inheritor is specified for the event or a wakeup for the event is called.
251  *
252  * Returns: result of the wait.
253  */
254 extern wait_result_t lck_mtx_sleep_with_inheritor(
255 	lck_mtx_t               *lock,
256 	lck_sleep_action_t      lck_sleep_action,
257 	event_t                 event,
258 	thread_t                inheritor,
259 	wait_interrupt_t        interruptible,
260 	uint64_t                deadline);
261 
262 /*
263  * Name: lck_mtx_sleep_with_inheritor
264  *
265  * Description:
266  *   deschedule the current thread and wait on the waitq associated with event
267  *   to be woken up.
268  *
269  *   While waiting, the sched priority of the waiting thread will contribute to
270  *   the push of the event that will be directed to the inheritor specified.
271  *
272  *   An interruptible mode and deadline can be specified to return earlier from
273  *   the wait.
274  *
275  * Args:
276  *   Arg1: lck_rw_t lock used to protect the sleep.
277  *         The lock will be dropped while sleeping and reaquired before
278  *         returning according to the sleep action specified.
279  *   Arg2: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE.
280  *   Arg3: event to wait on.
281  *   Arg4: thread to propagate the event push to.
282  *   Arg5: interruptible flag for wait.
283  *   Arg6: deadline for wait.
284  *
285  * Conditions:
286  *   Lock must be held.
287  *
288  *   Returns with the lock held according to the sleep action specified.
289  *
290  *   Lock will be dropped while waiting.
291  *
292  *   The inheritor specified cannot return to user space or exit until another
293  *   inheritor is specified for the event or a wakeup for the event is called.
294  *
295  * Returns: result of the wait.
296  */
297 extern wait_result_t lck_rw_sleep_with_inheritor(
298 	lck_rw_t                *lock,
299 	lck_sleep_action_t      lck_sleep_action,
300 	event_t                 event,
301 	thread_t                inheritor,
302 	wait_interrupt_t        interruptible,
303 	uint64_t                deadline);
304 
305 /*
306  * Name: wakeup_one_with_inheritor
307  *
308  * Description:
309  *   Wake up one waiter for event if any.
310  *
311  *   The thread woken up will be the one with the higher sched priority waiting
312  *   on event.
313  *
314  *   The push for the event will be transferred from the last inheritor to the
315  *   woken up thread.
316  *
317  * Args:
318  *   Arg1: event to wake from.
319  *   Arg2: wait result to pass to the woken up thread.
320  *   Arg3: pointer for storing the thread wokenup.
321  *
322  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
323  *
324  * Conditions:
325  *   The new woken up inheritor cannot return to user space or exit until
326  *   another inheritor is specified for the event or a new wakeup for the event
327  *   is performed.
328  *
329  *   A reference for the woken thread is acquired.
330  *
331  *   NOTE: this cannot be called from interrupt context.
332  */
333 extern kern_return_t wakeup_one_with_inheritor(
334 	event_t                 event,
335 	wait_result_t           result,
336 	lck_wake_action_t       action,
337 	thread_t                *thread_wokenup);
338 
339 /*
340  * Name: wakeup_all_with_inheritor
341  *
342  * Description: wake up all waiters waiting for event. The old inheritor will lose the push.
343  *
344  * Args:
345  *   Arg1: event to wake from.
346  *   Arg2: wait result to pass to the woken up threads.
347  *
348  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
349  *
350  * Conditions: NOTE: this cannot be called from interrupt context.
351  */
352 extern kern_return_t wakeup_all_with_inheritor(
353 	event_t                 event,
354 	wait_result_t           result);
355 
356 /*
357  * Name: change_sleep_inheritor
358  *
359  * Description:
360  *   Redirect the push of the waiting threads of event to the new inheritor specified.
361  *
362  * Args:
363  *   Arg1: event to redirect the push.
364  *   Arg2: new inheritor for event.
365  *
366  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
367  *
368  * Conditions:
369  *   In case of success, the new inheritor cannot return to user space or exit
370  *   until another inheritor is specified for the event or a wakeup for the
371  *   event is called.
372  *
373  *   NOTE: this cannot be called from interrupt context.
374  */
375 extern kern_return_t change_sleep_inheritor(
376 	event_t                 event,
377 	thread_t                inheritor);
378 
379 
380 #if XNU_KERNEL_PRIVATE
381 
382 /*
383  * Bits layout of cond_swi_var32/cond_swi_var64.
384  * First SWI_COND_OWNER_BITS are reserved for the owner
385  * the remaining can be used by the caller
386  */
387 #define SWI_COND_OWNER_BITS     20
388 #define SWI_COND_CALLER_BITS    (32 - SWI_COND_OWNER_BITS)
389 
390 typedef struct cond_swi_var32 {
391 	union {
392 		uint32_t cond32_data;
393 		struct {
394 			uint32_t cond32_owner: SWI_COND_OWNER_BITS,
395 			    cond32_caller_bits: SWI_COND_CALLER_BITS;
396 		};
397 	};
398 } cond_swi_var32_s;
399 
400 typedef struct cond_swi_var64 {
401 	union {
402 		uint64_t cond64_data;
403 		struct {
404 			uint32_t cond64_owner: SWI_COND_OWNER_BITS,
405 			    cond64_caller_bits: SWI_COND_CALLER_BITS;
406 			uint32_t cond64_caller_extra;
407 		};
408 	};
409 } cond_swi_var64_s;
410 
411 typedef struct cond_swi_var *cond_swi_var_t;
412 
413 /*
414  * Name: cond_sleep_with_inheritor32
415  *
416  * Description: Conditionally sleeps with inheritor, with condition variable of 32bits.
417  *              Allows a thread to conditionally sleep while indicating which thread should
418  *              inherit the priority push associated with the condition.
419  *              The condition should be expressed through a cond_swi_var32_s pointer.
420  *              The condition needs to be populated by the caller with the ctid of the
421  *              thread that should inherit the push. The remaining bits of the condition
422  *              can be used by the caller to implement its own synchronization logic.
423  *              A copy of the condition value observed by the caller when it decided to call
424  *              this function should be provided to prevent races with matching wakeups.
425  *              This function will atomically check the value stored in the condition against
426  *              the expected/observed one provided. If the check doesn't pass the thread will not
427  *              sleep and the function will return.
428  *              The ctid provided in the condition will be used only after a successful
429  *              check.
430  *
431  * Args:
432  *   Arg1: cond_swi_var32_s pointer that stores the condition to check.
433  *   Arg2: cond_swi_var32_s observed value to check for conditionally sleep.
434  *   Arg3: interruptible flag for wait.
435  *   Arg4: deadline for wait.
436  *
437  * Conditions:
438  *   The inheritor specified cannot return to user space or exit until another
439  *   inheritor is specified for the cond or a wakeup for the cond is called.
440  *
441  * Returns: result of the wait.
442  */
443 extern wait_result_t cond_sleep_with_inheritor32(
444 	cond_swi_var_t          cond,
445 	cond_swi_var32_s        expected_cond,
446 	wait_interrupt_t        interruptible,
447 	uint64_t                deadline);
448 
449 /*
450  * Name: cond_sleep_with_inheritor64
451  *
452  * Description: Conditionally sleeps with inheritor, with condition variable of 64bits.
453  *              Allows a thread to conditionally sleep while indicating which thread should
454  *              inherit the priority push associated with the condition.
455  *              The condition should be expressed through a cond_swi_var64_s pointer.
456  *              The condition needs to be populated by the caller with the ctid of the
457  *              thread that should inherit the push. The remaining bits of the condition
458  *              can be used by the caller to implement its own synchronization logic.
459  *              A copy of the condition value observed by the caller when it decided to call
460  *              this function should be provided to prevent races with matching wakeups.
461  *              This function will atomically check the value stored in the condition against
462  *              the expected/observed one provided. If the check doesn't pass the thread will not
463  *              sleep and the function will return.
464  *              The ctid provided in the condition will be used only after a successful
465  *              check.
466  *
467  * Args:
468  *   Arg1: cond_swi_var64_s pointer that stores the condition to check.
469  *   Arg2: cond_swi_var64_s observed value to check for conditionally sleep.
470  *   Arg3: interruptible flag for wait.
471  *   Arg4: deadline for wait.
472  *
473  * Conditions:
474  *   The inheritor specified cannot return to user space or exit until another
475  *   inheritor is specified for the cond or a wakeup for the cond is called.
476  *
477  * Returns: result of the wait.
478  */
479 extern wait_result_t cond_sleep_with_inheritor64(
480 	cond_swi_var_t          cond,
481 	cond_swi_var64_s        expected_cond,
482 	wait_interrupt_t        interruptible,
483 	uint64_t                deadline);
484 
485 /*
486  * Name: cond_sleep_with_inheritor64_mask
487  *
488  * Description: Conditionally sleeps with inheritor, with condition variable of 64bits.
489  *              Allows a thread to conditionally sleep while indicating which thread should
490  *              inherit the priority push associated with the condition.
491  *              The condition should be expressed through a cond_swi_var64_s pointer.
492  *              The condition needs to be populated by the caller with the ctid of the
493  *              thread that should inherit the push. The remaining bits of the condition
494  *              can be used by the caller to implement its own synchronization logic.
495  *              A copy of the condition value observed by the caller when it decided to call
496  *              this function should be provided to prevent races with matching wakeups.
497  *              This function will atomically check the value stored in the condition against
498  *              the expected/observed one provided only for the bits that are set in the mask.
499  *              If the check doesn't pass the thread will not sleep and the function will return.
500  *              The ctid provided in the condition will be used only after a successful
501  *              check.
502  *
503  * Args:
504  *   Arg1: cond_swi_var64_s pointer that stores the condition to check.
505  *   Arg2: cond_swi_var64_s observed value to check for conditionally sleep.
506  *   Arg3: mask to apply to the condition to check.
507  *   Arg4: interruptible flag for wait.
508  *   Arg5: deadline for wait.
509  *
510  * Conditions:
511  *   The inheritor specified cannot return to user space or exit until another
512  *   inheritor is specified for the cond or a wakeup for the cond is called.
513  *
514  * Returns: result of the wait.
515  */
516 extern wait_result_t cond_sleep_with_inheritor64_mask(
517 	cond_swi_var_t          cond,
518 	cond_swi_var64_s        expected_cond,
519 	uint64_t                check_mask,
520 	wait_interrupt_t        interruptible,
521 	uint64_t                deadline);
522 
523 /*
524  * Name: cond_wakeup_one_with_inheritor
525  *
526  * Description: Wake up one waiter waiting on the condition (if any).
527  *              The thread woken up will be the one with the higher sched priority waiting on the condition.
528  *              The push for the condition will be transferred from the last inheritor to the woken up thread.
529  *
530  * Args:
531  *   Arg1: condition to wake from.
532  *   Arg2: wait result to pass to the woken up thread.
533  *   Arg3: pointer for storing the thread wokenup.
534  *
535  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
536  *
537  * Conditions:
538  *   The new woken up inheritor cannot return to user space or exit until
539  *   another inheritor is specified for the event or a new wakeup for the event
540  *   is performed.
541  *
542  *   A reference for the woken thread is acquired.
543  *
544  *   NOTE: this cannot be called from interrupt context.
545  */
546 extern kern_return_t cond_wakeup_one_with_inheritor(
547 	cond_swi_var_t          cond,
548 	wait_result_t           result,
549 	lck_wake_action_t       action,
550 	thread_t                *thread_wokenup);
551 
552 /*
553  * Name: cond_wakeup_all_with_inheritor
554  *
555  * Description: Wake up all waiters waiting on the same condition. The old inheritor will lose the push.
556  *
557  * Args:
558  *   Arg1: condition to wake from.
559  *   Arg2: wait result to pass to the woken up threads.
560  *
561  * Returns: KERN_NOT_WAITING if no threads were waiting, KERN_SUCCESS otherwise.
562  *
563  * Conditions: NOTE: this cannot be called from interrupt context.
564  */
565 extern kern_return_t cond_wakeup_all_with_inheritor(
566 	cond_swi_var_t          cond,
567 	wait_result_t           result);
568 
569 /*
570  * gate structure
571  */
572 typedef struct gate {
573 	uintptr_t         gt_data;                // thread holder, interlock bit and waiter bit
574 	struct turnstile *gt_turnstile;           // turnstile, protected by the interlock bit
575 	union {
576 		struct {
577 			uint32_t  gt_refs:16,             // refs using the gate, protected by interlock bit
578 			    gt_alloc:1,                   // gate was allocated with gate_alloc_init
579 			    gt_type:2,                    // type bits for validity
580 			    gt_flags_pad:13;              // unused
581 		};
582 		uint32_t  gt_flags;
583 	};
584 } gate_t;
585 
586 #else /* XNU_KERNEL_PRIVATE */
587 
588 typedef struct gate {
589 	uintptr_t         opaque1;
590 	uintptr_t         opaque2;
591 	uint32_t          opaque3;
592 } gate_t;
593 
594 #endif /* XNU_KERNEL_PRIVATE */
595 
596 /*
597  * Possible gate_wait_result_t values.
598  */
599 __options_decl(gate_wait_result_t, unsigned int, {
600 	GATE_HANDOFF      = 0x00,         /* gate was handedoff to current thread */
601 	GATE_OPENED       = 0x01,         /* gate was opened */
602 	GATE_TIMED_OUT    = 0x02,         /* wait timedout */
603 	GATE_INTERRUPTED  = 0x03,         /* wait was interrupted */
604 });
605 
606 /*
607  * Gate flags used by gate_assert
608  */
609 __options_decl(gate_assert_flags_t, unsigned int, {
610 	GATE_ASSERT_CLOSED = 0x00,         /* asserts the gate is currently closed */
611 	GATE_ASSERT_OPEN   = 0x01,         /* asserts the gate is currently open */
612 	GATE_ASSERT_HELD   = 0x02,         /* asserts the gate is closed and held by current_thread() */
613 });
614 
615 /*
616  * Gate flags used by gate_handoff
617  */
618 __options_decl(gate_handoff_flags_t, unsigned int, {
619 	GATE_HANDOFF_DEFAULT            = 0x00,         /* a waiter must exist to handoff the gate */
620 	GATE_HANDOFF_OPEN_IF_NO_WAITERS = 0x1,         /* behave like a gate_open() if there are no waiters */
621 });
622 
623 /*
624  * Name: decl_lck_rw_gate_data
625  *
626  * Description: declares a gate variable with specified storage class.
627  *              The gate itself will be stored in this variable and it is the caller's responsibility
628  *              to ensure that this variable's memory is going to be accessible by all threads that will use
629  *              the gate.
630  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
631  *              be used in every thread.
632  *
633  *              The variable needs to be initialized once with lck_rw_gate_init() and destroyed once with
634  *              lck_rw_gate_destroy() when not needed anymore.
635  *
636  *              The gate will be used in conjunction with a lck_rw_t.
637  *
638  * Args:
639  *   Arg1: storage class.
640  *   Arg2: variable name.
641  */
642 #define decl_lck_rw_gate_data(class, name)                              class gate_t name
643 
644 /*
645  * Name: lck_rw_gate_init
646  *
647  * Description: initializes a variable declared with decl_lck_rw_gate_data.
648  *
649  * Args:
650  *   Arg1: lck_rw_t lock used to protect the gate.
651  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
652  */
653 extern void lck_rw_gate_init(lck_rw_t *lock, gate_t *gate);
654 
655 /*
656  * Name: lck_rw_gate_destroy
657  *
658  * Description: destroys a variable previously initialized
659  *              with lck_rw_gate_init().
660  *
661  * Args:
662  *   Arg1: lck_rw_t lock used to protect the gate.
663  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
664  */
665 extern void lck_rw_gate_destroy(lck_rw_t *lock, gate_t *gate);
666 
667 /*
668  * Name: lck_rw_gate_alloc_init
669  *
670  * Description: allocates and initializes a gate_t.
671  *
672  * Args:
673  *   Arg1: lck_rw_t lock used to protect the gate.
674  *
675  * Returns:
676  *         gate_t allocated.
677  */
678 extern gate_t* lck_rw_gate_alloc_init(lck_rw_t *lock);
679 
680 /*
681  * Name: lck_rw_gate_free
682  *
683  * Description: destroys and tries to free a gate previously allocated
684  *              with lck_rw_gate_alloc_init().
685  *              The gate free might be delegated to the last thread returning
686  *              from the gate_wait().
687  *
688  * Args:
689  *   Arg1: lck_rw_t lock used to protect the gate.
690  *   Arg2: pointer to the gate obtained with lck_rw_gate_alloc_init().
691  */
692 extern void lck_rw_gate_free(lck_rw_t *lock, gate_t *gate);
693 
694 /*
695  * Name: lck_rw_gate_try_close
696  *
697  * Description: Tries to close the gate.
698  *              In case of success the current thread will be set as
699  *              the holder of the gate.
700  *
701  * Args:
702  *   Arg1: lck_rw_t lock used to protect the gate.
703  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
704  *
705  * Conditions: Lock must be held. Returns with the lock held.
706  *
707  * Returns:
708  *   KERN_SUCCESS in case the gate was successfully closed. The current thread
709  *   is the new holder of the gate.
710  *
711  *   A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called
712  *   later on to wake up possible waiters on the gate before returning to
713  *   userspace.
714  *
715  *   If the intent is to conditionally probe the gate before waiting, the lock
716  *   must not be dropped between the calls to lck_rw_gate_try_close() and
717  *   lck_rw_gate_wait().
718  *
719  *   KERN_FAILURE in case the gate was already closed.
720  *   Will panic if the current thread was already the holder of the gate.
721  *
722  *   lck_rw_gate_wait() should be called instead if the intent is to
723  *   unconditionally wait on this gate.
724  *
725  *   The calls to lck_rw_gate_try_close() and lck_rw_gate_wait() should
726  *   be done without dropping the lock that is protecting the gate in between.
727  */
728 extern kern_return_t lck_rw_gate_try_close(lck_rw_t *lock, gate_t *gate);
729 
730 /*
731  * Name: lck_rw_gate_close
732  *
733  * Description: Closes the gate. The current thread will be set as
734  *              the holder of the gate. Will panic if the gate is already closed.
735  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
736  *              to wake up possible waiters on the gate before returning to userspace.
737  *
738  * Args:
739  *   Arg1: lck_rw_t lock used to protect the gate.
740  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
741  *
742  * Conditions: Lock must be held. Returns with the lock held.
743  *             The gate must be open.
744  *
745  */
746 extern void lck_rw_gate_close(lck_rw_t *lock, gate_t *gate);
747 
748 
749 /*
750  * Name: lck_rw_gate_open
751  *
752  * Description: Opens the gate and wakes up possible waiters.
753  *
754  * Args:
755  *   Arg1: lck_rw_t lock used to protect the gate.
756  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
757  *
758  * Conditions: Lock must be held. Returns with the lock held.
759  *             The current thread must be the holder of the gate.
760  *
761  */
762 extern void lck_rw_gate_open(lck_rw_t *lock, gate_t *gate);
763 
764 /*
765  * Name: lck_rw_gate_handoff
766  *
767  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
768  *              priority will be selected as the new holder of the gate, and woken up,
769  *              with the gate remaining in the closed state throughout.
770  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
771  *              will be returned.
772  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
773  *              case no waiters were found.
774  *
775  *
776  * Args:
777  *   Arg1: lck_rw_t lock used to protect the gate.
778  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
779  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
780  *
781  * Conditions: Lock must be held. Returns with the lock held.
782  *             The current thread must be the holder of the gate.
783  *
784  * Returns:
785  *          KERN_SUCCESS in case one of the waiters became the new holder.
786  *          KERN_NOT_WAITING in case there were no waiters.
787  *
788  */
789 extern kern_return_t lck_rw_gate_handoff(lck_rw_t *lock, gate_t *gate, gate_handoff_flags_t flags);
790 
791 /*
792  * Name: lck_rw_gate_steal
793  *
794  * Description: Set the current ownership of the gate. It sets the current thread as the
795  *              new holder of the gate.
796  *              A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on
797  *              to wake up possible waiters on the gate before returning to userspace.
798  *              NOTE: the previous holder should not call lck_rw_gate_open() or lck_rw_gate_handoff()
799  *              anymore.
800  *
801  *
802  * Args:
803  *   Arg1: lck_rw_t lock used to protect the gate.
804  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
805  *
806  * Conditions: Lock must be held. Returns with the lock held.
807  *             The gate must be closed and the current thread must not already be the holder.
808  *
809  */
810 extern void lck_rw_gate_steal(lck_rw_t *lock, gate_t *gate);
811 
812 /*
813  * Name: lck_rw_gate_wait
814  *
815  * Description: Waits for the current thread to become the holder of the gate or for the
816  *              gate to become open. An interruptible mode and deadline can be specified
817  *              to return earlier from the wait.
818  *
819  * Args:
820  *   Arg1: lck_rw_t lock used to protect the gate.
821  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
822  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_SHARED, LCK_SLEEP_EXCLUSIVE, LCK_SLEEP_UNLOCK.
823  *   Arg3: interruptible flag for wait.
824  *   Arg4: deadline
825  *
826  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
827  *             Lock will be dropped while waiting.
828  *             The gate must be closed.
829  *
830  * Returns: Reason why the thread was woken up.
831  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
832  *                         A matching lck_rw_gate_open() or lck_rw_gate_handoff() needs to be called later on.
833  *                         to wake up possible waiters on the gate before returning to userspace.
834  *          GATE_OPENED - the gate was opened by the holder.
835  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
836  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
837  */
838 extern gate_wait_result_t lck_rw_gate_wait(
839 	lck_rw_t               *lock,
840 	gate_t                 *gate,
841 	lck_sleep_action_t      lck_sleep_action,
842 	wait_interrupt_t        interruptible,
843 	uint64_t                deadline);
844 
845 /*
846  * Name: lck_rw_gate_assert
847  *
848  * Description: asserts that the gate is in the specified state.
849  *
850  * Args:
851  *   Arg1: lck_rw_t lock used to protect the gate.
852  *   Arg2: pointer to the gate data declared with decl_lck_rw_gate_data.
853  *   Arg3: flags to specified assert type.
854  *         GATE_ASSERT_CLOSED - the gate is currently closed
855  *         GATE_ASSERT_OPEN - the gate is currently opened
856  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
857  */
858 extern void lck_rw_gate_assert(lck_rw_t *lock, gate_t *gate, gate_assert_flags_t flags);
859 
860 /*
861  * Name: decl_lck_mtx_gate_data
862  *
863  * Description: declares a gate variable with specified storage class.
864  *              The gate itself will be stored in this variable and it is the caller's responsibility
865  *              to ensure that this variable's memory is going to be accessible by all threads that will use
866  *              the gate.
867  *              Every gate function will require a pointer to this variable as parameter. The same pointer should
868  *              be used in every thread.
869  *
870  *              The variable needs to be initialized once with lck_mtx_gate_init() and destroyed once with
871  *              lck_mtx_gate_destroy() when not needed anymore.
872  *
873  *              The gate will be used in conjunction with a lck_mtx_t.
874  *
875  * Args:
876  *   Arg1: storage class.
877  *   Arg2: variable name.
878  */
879 #define decl_lck_mtx_gate_data(class, name)                             class gate_t name
880 
881 /*
882  * Name: lck_mtx_gate_init
883  *
884  * Description: initializes a variable declared with decl_lck_mtx_gate_data.
885  *
886  * Args:
887  *   Arg1: lck_mtx_t lock used to protect the gate.
888  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
889  */
890 extern void lck_mtx_gate_init(lck_mtx_t *lock, gate_t *gate);
891 
892 /*
893  * Name: lck_mtx_gate_destroy
894  *
895  * Description: destroys a variable previously initialized
896  *              with lck_mtx_gate_init().
897  *
898  * Args:
899  *   Arg1: lck_mtx_t lock used to protect the gate.
900  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
901  */
902 extern void lck_mtx_gate_destroy(lck_mtx_t *lock, gate_t *gate);
903 
904 /*
905  * Name: lck_mtx_gate_alloc_init
906  *
907  * Description: allocates and initializes a gate_t.
908  *
909  * Args:
910  *   Arg1: lck_mtx_t lock used to protect the gate.
911  *
912  * Returns:
913  *         gate_t allocated.
914  */
915 extern gate_t* lck_mtx_gate_alloc_init(lck_mtx_t *lock);
916 
917 /*
918  * Name: lck_mtx_gate_free
919  *
920  * Description: destroys and tries to free a gate previously allocated
921  *	        with lck_mtx_gate_alloc_init().
922  *              The gate free might be delegated to the last thread returning
923  *              from the gate_wait().
924  *
925  * Args:
926  *   Arg1: lck_mtx_t lock used to protect the gate.
927  *   Arg2: pointer to the gate obtained with lck_mtx_gate_alloc_init().
928  */
929 extern void lck_mtx_gate_free(lck_mtx_t *lock, gate_t *gate);
930 
931 /*
932  * Name: lck_mtx_gate_try_close
933  *
934  * Description: Tries to close the gate.
935  *              In case of success the current thread will be set as
936  *              the holder of the gate.
937  *
938  * Args:
939  *   Arg1: lck_mtx_t lock used to protect the gate.
940  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
941  *
942  * Conditions: Lock must be held. Returns with the lock held.
943  *
944  * Returns:
945  *   KERN_SUCCESS in case the gate was successfully closed. The current thread
946  *   is the new holder of the gate.
947  *
948  *   A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called
949  *   later on to wake up possible waiters on the gate before returning to
950  *   userspace.
951  *
952  *   If the intent is to conditionally probe the gate before waiting, the lock
953  *   must not be dropped between the calls to lck_mtx_gate_try_close() and
954  *   lck_mtx_gate_wait().
955  *
956  *   KERN_FAILURE in case the gate was already closed. Will panic if the current
957  *   thread was already the holder of the gate.
958  *
959  *   lck_mtx_gate_wait() should be called instead if the intent is to
960  *   unconditionally wait on this gate.
961  *
962  *   The calls to lck_mtx_gate_try_close() and lck_mtx_gate_wait() should
963  *   be done without dropping the lock that is protecting the gate in between.
964  */
965 extern kern_return_t lck_mtx_gate_try_close(lck_mtx_t *lock, gate_t *gate);
966 
967 /*
968  * Name: lck_mtx_gate_close
969  *
970  * Description: Closes the gate. The current thread will be set as
971  *              the holder of the gate. Will panic if the gate is already closed.
972  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
973  *              to wake up possible waiters on the gate before returning to userspace.
974  *
975  * Args:
976  *   Arg1: lck_mtx_t lock used to protect the gate.
977  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
978  *
979  * Conditions: Lock must be held. Returns with the lock held.
980  *             The gate must be open.
981  *
982  */
983 extern void lck_mtx_gate_close(lck_mtx_t *lock, gate_t *gate);
984 
985 /*
986  * Name: lck_mtx_gate_open
987  *
988  * Description: Opens of the gate and wakes up possible waiters.
989  *
990  * Args:
991  *   Arg1: lck_mtx_t lock used to protect the gate.
992  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
993  *
994  * Conditions: Lock must be held. Returns with the lock held.
995  *             The current thread must be the holder of the gate.
996  *
997  */
998 extern void lck_mtx_gate_open(lck_mtx_t *lock, gate_t *gate);
999 
1000 /*
1001  * Name: lck_mtx_gate_handoff
1002  *
1003  * Description: Tries to transfer the ownership of the gate. The waiter with highest sched
1004  *              priority will be selected as the new holder of the gate, and woken up,
1005  *              with the gate remaining in the closed state throughout.
1006  *              If no waiters are present, the gate will be kept closed and KERN_NOT_WAITING
1007  *              will be returned.
1008  *              GATE_HANDOFF_OPEN_IF_NO_WAITERS flag can be used to specify if the gate should be opened in
1009  *              case no waiters were found.
1010  *
1011  *
1012  * Args:
1013  *   Arg1: lck_mtx_t lock used to protect the gate.
1014  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1015  *   Arg3: flags - GATE_HANDOFF_DEFAULT or GATE_HANDOFF_OPEN_IF_NO_WAITERS
1016  *
1017  * Conditions: Lock must be held. Returns with the lock held.
1018  *             The current thread must be the holder of the gate.
1019  *
1020  * Returns:
1021  *          KERN_SUCCESS in case one of the waiters became the new holder.
1022  *          KERN_NOT_WAITING in case there were no waiters.
1023  *
1024  */
1025 extern kern_return_t lck_mtx_gate_handoff(lck_mtx_t *lock, gate_t *gate, gate_handoff_flags_t flags);
1026 
1027 /*
1028  * Name: lck_mtx_gate_steal
1029  *
1030  * Description: Steals the ownership of the gate. It sets the current thread as the
1031  *              new holder of the gate.
1032  *              A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1033  *              to wake up possible waiters on the gate before returning to userspace.
1034  *              NOTE: the previous holder should not call lck_mtx_gate_open() or lck_mtx_gate_handoff()
1035  *              anymore.
1036  *
1037  *
1038  * Args:
1039  *   Arg1: lck_mtx_t lock used to protect the gate.
1040  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1041  *
1042  * Conditions: Lock must be held. Returns with the lock held.
1043  *             The gate must be closed and the current thread must not already be the holder.
1044  *
1045  */
1046 extern void lck_mtx_gate_steal(lck_mtx_t *lock, gate_t *gate);
1047 
1048 /*
1049  * Name: lck_mtx_gate_wait
1050  *
1051  * Description: Waits for the current thread to become the holder of the gate or for the
1052  *              gate to become open. An interruptible mode and deadline can be specified
1053  *              to return earlier from the wait.
1054  *
1055  * Args:
1056  *   Arg1: lck_mtx_t lock used to protect the gate.
1057  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1058  *   Arg3: sleep action. LCK_SLEEP_DEFAULT, LCK_SLEEP_UNLOCK, LCK_SLEEP_SPIN, LCK_SLEEP_SPIN_ALWAYS.
1059  *   Arg3: interruptible flag for wait.
1060  *   Arg4: deadline
1061  *
1062  * Conditions: Lock must be held. Returns with the lock held according to the sleep action specified.
1063  *             Lock will be dropped while waiting.
1064  *             The gate must be closed.
1065  *
1066  * Returns: Reason why the thread was woken up.
1067  *          GATE_HANDOFF - the current thread was handed off the ownership of the gate.
1068  *                         A matching lck_mtx_gate_open() or lck_mtx_gate_handoff() needs to be called later on
1069  *                         to wake up possible waiters on the gate before returning to userspace.
1070  *          GATE_OPENED - the gate was opened by the holder.
1071  *          GATE_TIMED_OUT - the thread was woken up by a timeout.
1072  *          GATE_INTERRUPTED - the thread was interrupted while sleeping.
1073  */
1074 extern gate_wait_result_t lck_mtx_gate_wait(
1075 	lck_mtx_t              *lock,
1076 	gate_t                 *gate,
1077 	lck_sleep_action_t      lck_sleep_action,
1078 	wait_interrupt_t        interruptible,
1079 	uint64_t                deadline);
1080 
1081 /*
1082  * Name: lck_mtx_gate_assert
1083  *
1084  * Description: asserts that the gate is in the specified state.
1085  *
1086  * Args:
1087  *   Arg1: lck_mtx_t lock used to protect the gate.
1088  *   Arg2: pointer to the gate data declared with decl_lck_mtx_gate_data.
1089  *   Arg3: flags to specified assert type.
1090  *         GATE_ASSERT_CLOSED - the gate is currently closed
1091  *         GATE_ASSERT_OPEN - the gate is currently opened
1092  *         GATE_ASSERT_HELD - the gate is currently closed and the current thread is the holder
1093  */
1094 extern void lck_mtx_gate_assert(lck_mtx_t *lock, gate_t *gate, gate_assert_flags_t flags);
1095 
1096 extern void             lck_spin_assert(
1097 	lck_spin_t              *lck,
1098 	unsigned                int    type);
1099 
1100 #if CONFIG_PV_TICKET
1101 __startup_func extern void lck_init_pv(void);
1102 #endif
1103 
1104 #endif  /* KERNEL_PRIVATE */
1105 
1106 #if MACH_ASSERT
1107 #define LCK_SPIN_ASSERT(lck, type) lck_spin_assert((lck),(type))
1108 #else /* MACH_ASSERT */
1109 #define LCK_SPIN_ASSERT(lck, type)
1110 #endif /* MACH_ASSERT */
1111 
1112 #if DEBUG
1113 #define LCK_SPIN_ASSERT_DEBUG(lck, type) lck_spin_assert((lck),(type))
1114 #else /* DEBUG */
1115 #define LCK_SPIN_ASSERT_DEBUG(lck, type)
1116 #endif /* DEBUG */
1117 
1118 #define LCK_ASSERT_OWNED                1
1119 #define LCK_ASSERT_NOTOWNED             2
1120 
1121 #ifdef  MACH_KERNEL_PRIVATE
1122 
1123 typedef struct lck_spinlock_to_info {
1124 	void                   *lock;
1125 #if DEBUG || DEVELOPMENT
1126 	uintptr_t               owner_thread_orig;
1127 #endif /* DEBUG || DEVELOPMENT */
1128 	uintptr_t               owner_thread_cur;
1129 	int                     owner_cpu;
1130 	uint32_t                extra;
1131 } *lck_spinlock_to_info_t;
1132 
1133 extern volatile lck_spinlock_to_info_t lck_spinlock_timeout_in_progress;
1134 PERCPU_DECL(struct lck_spinlock_to_info, lck_spinlock_to_info);
1135 
1136 typedef struct lck_tktlock_pv_info {
1137 	void                   *ltpi_lck;
1138 	uint8_t                 ltpi_wt;
1139 } *lck_tktlock_pv_info_t;
1140 
1141 PERCPU_DECL(struct lck_tktlock_pv_info, lck_tktlock_pv_info);
1142 
1143 extern void             lck_spinlock_timeout_set_orig_owner(
1144 	uintptr_t               owner);
1145 
1146 extern void             lck_spinlock_timeout_set_orig_ctid(
1147 	uint32_t                ctid);
1148 
1149 extern lck_spinlock_to_info_t lck_spinlock_timeout_hit(
1150 	void                   *lck,
1151 	uintptr_t               owner);
1152 
1153 #endif /* MACH_KERNEL_PRIVATE */
1154 #if  XNU_KERNEL_PRIVATE
1155 
1156 uintptr_t unslide_for_kdebug(void* object);
1157 
1158 struct lck_attr_startup_spec {
1159 	lck_attr_t              *lck_attr;
1160 	uint32_t                lck_attr_set_flags;
1161 	uint32_t                lck_attr_clear_flags;
1162 };
1163 
1164 struct lck_spin_startup_spec {
1165 	lck_spin_t              *lck;
1166 	lck_grp_t               *lck_grp;
1167 	lck_attr_t              *lck_attr;
1168 };
1169 
1170 struct lck_ticket_startup_spec {
1171 	lck_ticket_t            *lck;
1172 	lck_grp_t               *lck_grp;
1173 };
1174 
1175 extern void             lck_attr_startup_init(
1176 	struct lck_attr_startup_spec *spec);
1177 
1178 extern void             lck_spin_startup_init(
1179 	struct lck_spin_startup_spec *spec);
1180 
1181 extern void             lck_ticket_startup_init(
1182 	struct lck_ticket_startup_spec *spec);
1183 
1184 /*
1185  * Auto-initializing locks declarations
1186  * ------------------------------------
1187  *
1188  * Unless you need to configure your locks in very specific ways,
1189  * there is no point creating explicit lock attributes. For most
1190  * static locks, these declaration macros can be used:
1191  *
1192  * - LCK_SPIN_DECLARE for spinlocks,
1193  * - LCK_MTX_DECLARE for mutexes,
1194  *
1195  * For cases when some particular attributes need to be used,
1196  * these come in *_ATTR variants that take a variable declared with
1197  * LCK_ATTR_DECLARE as an argument.
1198  */
1199 #define LCK_ATTR_DECLARE(var, set_flags, clear_flags) \
1200 	SECURITY_READ_ONLY_LATE(lck_attr_t) var; \
1201 	static __startup_data struct lck_attr_startup_spec \
1202 	__startup_lck_attr_spec_ ## var = { &var, set_flags, clear_flags }; \
1203 	STARTUP_ARG(LOCKS, STARTUP_RANK_SECOND, lck_attr_startup_init, \
1204 	    &__startup_lck_attr_spec_ ## var)
1205 
1206 #define LCK_SPIN_DECLARE_ATTR(var, grp, attr) \
1207 	lck_spin_t var; \
1208 	static __startup_data struct lck_spin_startup_spec \
1209 	__startup_lck_spin_spec_ ## var = { &var, grp, attr }; \
1210 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_spin_startup_init, \
1211 	    &__startup_lck_spin_spec_ ## var)
1212 
1213 #define LCK_SPIN_DECLARE(var, grp) \
1214 	LCK_SPIN_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
1215 
1216 #define LCK_TICKET_DECLARE(var, grp) \
1217 	lck_ticket_t var; \
1218 	static __startup_data struct lck_ticket_startup_spec \
1219 	__startup_lck_ticket_spec_ ## var = { &var, grp }; \
1220 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_ticket_startup_init, \
1221 	    &__startup_lck_ticket_spec_ ## var)
1222 
1223 #endif /* XNU_KERNEL_PRIVATE */
1224 
1225 __END_DECLS
1226 
1227 #endif /* _KERN_LOCKS_H_ */
1228