xref: /xnu-11215.81.4/osfmk/kern/lock_rw.h (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_RW_LOCK_H_
30 #define _KERN_RW_LOCK_H_
31 
32 #include <kern/lock_types.h>
33 #include <kern/lock_group.h>
34 #include <kern/lock_attr.h>
35 
36 #ifdef  XNU_KERNEL_PRIVATE
37 #include <kern/startup.h>
38 #endif /* XNU_KERNEL_PRIVATE */
39 
40 __BEGIN_DECLS
41 
42 #ifdef  MACH_KERNEL_PRIVATE
43 
44 typedef union {
45 	struct {
46 		uint16_t        shared_count;       /* No. of shared granted request */
47 		uint16_t
48 		    interlock:              1,      /* Interlock */
49 		    priv_excl:              1,      /* priority for Writer */
50 		    want_upgrade:           1,      /* Read-to-write upgrade waiting */
51 		    want_excl:              1,      /* Writer is waiting, or locked for write */
52 		    r_waiting:              1,      /* Someone is sleeping on lock */
53 		    w_waiting:              1,      /* Writer is sleeping on lock */
54 		    can_sleep:              1,      /* Can attempts to lock go to sleep? */
55 		    _pad2:                  8,      /* padding */
56 		    tag_valid:              1;      /* Field is actually a tag, not a bitfield */
57 	};
58 	uint32_t        data;                       /* Single word version of bitfields and shared count */
59 } lck_rw_word_t;
60 
61 typedef struct lck_rw_s {
62 	uint32_t        lck_rw_unused : 24; /* tsid one day ... */
63 	uint32_t        lck_rw_type   :  8; /* LCK_TYPE_RW */
64 	uint32_t        lck_rw_padding;
65 	lck_rw_word_t   lck_rw;
66 	uint32_t        lck_rw_owner;       /* ctid_t */
67 } lck_rw_t;     /* arm: 8  arm64: 16 x86: 16 */
68 
69 #define lck_rw_shared_count     lck_rw.shared_count
70 #define lck_rw_interlock        lck_rw.interlock
71 #define lck_rw_priv_excl        lck_rw.priv_excl
72 #define lck_rw_want_upgrade     lck_rw.want_upgrade
73 #define lck_rw_want_excl        lck_rw.want_excl
74 #define lck_r_waiting           lck_rw.r_waiting
75 #define lck_w_waiting           lck_rw.w_waiting
76 #define lck_rw_can_sleep        lck_rw.can_sleep
77 #define lck_rw_data             lck_rw.data
78 // tag and data reference the same memory. When the tag_valid bit is set,
79 // the data word should be treated as a tag instead of a bitfield.
80 #define lck_rw_tag_valid        lck_rw.tag_valid
81 #define lck_rw_tag              lck_rw.data
82 
83 #define LCK_RW_SHARED_READER_OFFSET      0
84 #define LCK_RW_INTERLOCK_BIT            16
85 #define LCK_RW_PRIV_EXCL_BIT            17
86 #define LCK_RW_WANT_UPGRADE_BIT         18
87 #define LCK_RW_WANT_EXCL_BIT            19
88 #define LCK_RW_R_WAITING_BIT            20
89 #define LCK_RW_W_WAITING_BIT            21
90 #define LCK_RW_CAN_SLEEP_BIT            22
91 //                                      23-30
92 #define LCK_RW_TAG_VALID_BIT            31
93 
94 #define LCK_RW_INTERLOCK                (1U << LCK_RW_INTERLOCK_BIT)
95 #define LCK_RW_R_WAITING                (1U << LCK_RW_R_WAITING_BIT)
96 #define LCK_RW_W_WAITING                (1U << LCK_RW_W_WAITING_BIT)
97 #define LCK_RW_WANT_UPGRADE             (1U << LCK_RW_WANT_UPGRADE_BIT)
98 #define LCK_RW_WANT_EXCL                (1U << LCK_RW_WANT_EXCL_BIT)
99 #define LCK_RW_TAG_VALID                (1U << LCK_RW_TAG_VALID_BIT)
100 #define LCK_RW_PRIV_EXCL                (1U << LCK_RW_PRIV_EXCL_BIT)
101 #define LCK_RW_SHARED_MASK              (0xffff << LCK_RW_SHARED_READER_OFFSET)
102 #define LCK_RW_SHARED_READER            (0x1 << LCK_RW_SHARED_READER_OFFSET)
103 
104 #define LCK_RW_TAG_DESTROYED            ((LCK_RW_TAG_VALID | 0xdddddeadu))      /* lock marked as Destroyed */
105 
106 #elif KERNEL_PRIVATE
107 typedef struct {
108 	uintptr_t               opaque[2] __kernel_data_semantics;
109 } lck_rw_t;
110 #else /* @KERNEL_PRIVATE */
111 typedef struct __lck_rw_t__     lck_rw_t;
112 #endif /* !KERNEL_PRIVATE */
113 
114 #if DEVELOPMENT || DEBUG
115 #ifdef XNU_KERNEL_PRIVATE
116 
117 #define DEBUG_RW                        1
118 #define LCK_RW_EXPECTED_MAX_NUMBER      3       /* Expected number per thread of concurrently held rw_lock */
119 
120 #if __LP64__
121 #define LCK_RW_CALLER_PACKED_BITS   48
122 #define LCK_RW_CALLER_PACKED_SHIFT   0
123 #define LCK_RW_CALLER_PACKED_BASE    0
124 #else
125 #define LCK_RW_CALLER_PACKED_BITS   32
126 #define LCK_RW_CALLER_PACKED_SHIFT   0
127 #define LCK_RW_CALLER_PACKED_BASE    0
128 #endif
129 
130 _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(LCK_RW_CALLER_PACKED),
131     "Make sure the rwlde_caller_packed pointer packing is based on arithmetic shifts");
132 
133 
134 struct __attribute__ ((packed)) rw_lock_debug_entry {
135 	lck_rw_t      *rwlde_lock;                                       // rw_lock held
136 	int8_t        rwlde_mode_count;                                  // -1 is held in write mode, positive value is the recursive read count
137 #if __LP64__
138 	uintptr_t     rwlde_caller_packed: LCK_RW_CALLER_PACKED_BITS;    // caller that created the entry
139 #else
140 	uintptr_t     rwlde_caller_packed;                               // caller that created the entry
141 #endif
142 };
143 typedef struct rw_lock_debug {
144 	struct rw_lock_debug_entry rwld_locks[LCK_RW_EXPECTED_MAX_NUMBER]; /* rw_lock debug info of currently held locks */
145 	uint8_t                    rwld_locks_saved : 7,                   /* number of locks saved in rwld_locks */
146 	    rwld_overflow : 1;                                             /* lock_entry was full, so it might be inaccurate */
147 	uint32_t                   rwld_locks_acquired;                    /* number of locks acquired */
148 } rw_lock_debug_t;
149 
150 _Static_assert(LCK_RW_EXPECTED_MAX_NUMBER <= 127, "LCK_RW_EXPECTED_MAX_NUMBER bigger than rwld_locks_saved");
151 
152 #endif /* XNU_KERNEL_PRIVATE */
153 #endif /* DEVELOPMENT || DEBUG */
154 
155 typedef unsigned int     lck_rw_type_t;
156 
157 #define LCK_RW_TYPE_SHARED              0x01
158 #define LCK_RW_TYPE_EXCLUSIVE           0x02
159 
160 #define decl_lck_rw_data(class, name)   class lck_rw_t name
161 
162 #if XNU_KERNEL_PRIVATE
163 /*
164  * Auto-initializing rw-locks declarations
165  * ------------------------------------
166  *
167  * Unless you need to configure your locks in very specific ways,
168  * there is no point creating explicit lock attributes. For most
169  * static locks, this declaration macro can be used:
170  *
171  * - LCK_RW_DECLARE.
172  *
173  * For cases when some particular attributes need to be used,
174  * LCK_RW_DECLARE_ATTR takes a variable declared with
175  * LCK_ATTR_DECLARE as an argument.
176  */
177 
178 struct lck_rw_startup_spec {
179 	lck_rw_t                *lck;
180 	lck_grp_t               *lck_grp;
181 	lck_attr_t              *lck_attr;
182 };
183 
184 extern void             lck_rw_startup_init(
185 	struct lck_rw_startup_spec *spec);
186 
187 #define LCK_RW_DECLARE_ATTR(var, grp, attr) \
188 	lck_rw_t var; \
189 	static __startup_data struct lck_rw_startup_spec \
190 	__startup_lck_rw_spec_ ## var = { &var, grp, attr }; \
191 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_rw_startup_init, \
192 	    &__startup_lck_rw_spec_ ## var)
193 
194 #define LCK_RW_DECLARE(var, grp) \
195 	LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
196 
197 #if MACH_ASSERT
198 #define LCK_RW_ASSERT(lck, type) do { \
199 	        if (__improbable(!(lck_opts_get() & LCK_OPTION_DISABLE_RW_DEBUG))) { \
200 	                lck_rw_assert((lck),(type)); \
201 	        } \
202 	} while (0)
203 #else /* MACH_ASSERT */
204 #define LCK_RW_ASSERT(lck, type)
205 #endif /* MACH_ASSERT */
206 
207 #endif /* XNU_KERNEL_PRIVATE */
208 
209 
210 /*!
211  * @function lck_rw_alloc_init
212  *
213  * @abstract
214  * Allocates and initializes a rw_lock_t.
215  *
216  * @discussion
217  * The function can block. See lck_rw_init() for initialization details.
218  *
219  * @param grp           lock group to associate with the lock.
220  * @param attr          lock attribute to initialize the lock.
221  *
222  * @returns             NULL or the allocated lock
223  */
224 extern lck_rw_t         *lck_rw_alloc_init(
225 	lck_grp_t               *grp,
226 	lck_attr_t              *attr);
227 
228 /*!
229  * @function lck_rw_init
230  *
231  * @abstract
232  * Initializes a rw_lock_t.
233  *
234  * @discussion
235  * Usage statistics for the lock are going to be added to the lock group provided.
236  *
237  * The lock attribute can be LCK_ATTR_NULL or an attribute can be allocated with
238  * lck_attr_alloc_init. So far however none of the attribute settings are supported.
239  *
240  * @param lck           lock to initialize.
241  * @param grp           lock group to associate with the lock.
242  * @param attr          lock attribute to initialize the lock.
243  */
244 extern void             lck_rw_init(
245 	lck_rw_t                *lck,
246 	lck_grp_t               *grp,
247 	lck_attr_t              *attr);
248 
249 /*!
250  * @function lck_rw_free
251  *
252  * @abstract
253  * Frees a rw_lock previously allocated with lck_rw_alloc_init().
254  *
255  * @discussion
256  * The lock must be not held by any thread.
257  *
258  * @param lck           rw_lock to free.
259  */
260 extern void             lck_rw_free(
261 	lck_rw_t                *lck,
262 	lck_grp_t               *grp);
263 
264 /*!
265  * @function lck_rw_destroy
266  *
267  * @abstract
268  * Destroys a rw_lock previously initialized with lck_rw_init().
269  *
270  * @discussion
271  * The lock must be not held by any thread.
272  *
273  * @param lck           rw_lock to destroy.
274  */
275 extern void             lck_rw_destroy(
276 	lck_rw_t                *lck,
277 	lck_grp_t               *grp);
278 
279 /*!
280  * @function lck_rw_lock
281  *
282  * @abstract
283  * Locks a rw_lock with the specified type.
284  *
285  * @discussion
286  * See lck_rw_lock_shared() or lck_rw_lock_exclusive() for more details.
287  *
288  * @param lck           rw_lock to lock.
289  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
290  */
291 extern void             lck_rw_lock(
292 	lck_rw_t                *lck,
293 	lck_rw_type_t           lck_rw_type);
294 
295 /*!
296  * @function lck_rw_try_lock
297  *
298  * @abstract
299  * Tries to locks a rw_lock with the specified type.
300  *
301  * @discussion
302  * This function will return and not wait/block in case the lock is already held.
303  * See lck_rw_try_lock_shared() or lck_rw_try_lock_exclusive() for more details.
304  *
305  * @param lck           rw_lock to lock.
306  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
307  *
308  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
309  */
310 extern boolean_t        lck_rw_try_lock(
311 	lck_rw_t                *lck,
312 	lck_rw_type_t           lck_rw_type);
313 
314 /*!
315  * @function lck_rw_unlock
316  *
317  * @abstract
318  * Unlocks a rw_lock previously locked with lck_rw_type.
319  *
320  * @discussion
321  * The lock must be unlocked by the same thread it was locked from.
322  * The type of the lock/unlock have to match, unless an upgrade/downgrade was performed while
323  * holding the lock.
324  *
325  * @param lck           rw_lock to unlock.
326  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
327  */
328 extern void             lck_rw_unlock(
329 	lck_rw_t                *lck,
330 	lck_rw_type_t           lck_rw_type);
331 
332 /*!
333  * @function lck_rw_lock_shared
334  *
335  * @abstract
336  * Locks a rw_lock in shared mode.
337  *
338  * @discussion
339  * This function can block.
340  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
341  * can acquire it in exclusive mode.
342  * If the lock is held in shared mode and there are no writers waiting, a reader will be able to acquire
343  * the lock without waiting.
344  * If the lock is held in shared mode and there is at least a writer waiting, a reader will wait
345  * for all the writers to make progress.
346  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
347  *
348  * @param lck           rw_lock to lock.
349  */
350 extern void             lck_rw_lock_shared(
351 	lck_rw_t                *lck);
352 
353 
354 #if MACH_KERNEL_PRIVATE
355 /*!
356  * @function lck_rw_lock_shared_b
357  *
358  * @abstract
359  * Locks a rw_lock in shared mode. Returns early if the lock can't be acquired
360  * and the specified block returns true.
361  *
362  * @discussion
363  * Identical to lck_rw_lock_shared() but can return early if the lock can't be
364  * acquired and the specified block returns true. The block is called
365  * repeatedly when waiting to acquire the lock.
366  * Should only be called when the lock cannot sleep (i.e. when
367  * lock->lck_rw_can_sleep is false).
368  *
369  * @param lock           rw_lock to lock.
370  * @param lock_pause     block invoked while waiting to acquire lock
371  *
372  * @returns              Returns TRUE if the lock is successfully taken,
373  *                       FALSE if the block returns true and the lock has
374  *                       not been acquired.
375  */
376 extern boolean_t
377     lck_rw_lock_shared_b(
378 	lck_rw_t        * lock,
379 	bool            (^lock_pause)(void));
380 
381 /*!
382  * @function lck_rw_lock_exclusive_b
383  *
384  * @abstract
385  * Locks a rw_lock in exclusive mode. Returns early if the lock can't be acquired
386  * and the specified block returns true.
387  *
388  * @discussion
389  * Identical to lck_rw_lock_exclusive() but can return early if the lock can't be
390  * acquired and the specified block returns true. The block is called
391  * repeatedly when waiting to acquire the lock.
392  * Should only be called when the lock cannot sleep (i.e. when
393  * lock->lck_rw_can_sleep is false).
394  *
395  * @param lock           rw_lock to lock.
396  * @param lock_pause     block invoked while waiting to acquire lock
397  *
398  * @returns              Returns TRUE if the lock is successfully taken,
399  *                       FALSE if the block returns true and the lock has
400  *                       not been acquired.
401  */
402 extern boolean_t
403     lck_rw_lock_exclusive_b(
404 	lck_rw_t        * lock,
405 	bool            (^lock_pause)(void));
406 #endif /* MACH_KERNEL_PRIVATE */
407 
408 /*!
409  * @function lck_rw_lock_shared_to_exclusive
410  *
411  * @abstract
412  * Upgrades a rw_lock held in shared mode to exclusive.
413  *
414  * @discussion
415  * This function can block.
416  * Only one reader at a time can upgrade to exclusive mode. If the upgrades fails the function will
417  * return with the lock not held.
418  * The caller needs to hold the lock in shared mode to upgrade it.
419  *
420  * @param lck           rw_lock already held in shared mode to upgrade.
421  *
422  * @returns TRUE if the lock was upgraded, FALSE if it was not possible.
423  *          If the function was not able to upgrade the lock, the lock will be dropped
424  *          by the function.
425  */
426 extern boolean_t        lck_rw_lock_shared_to_exclusive(
427 	lck_rw_t                *lck);
428 
429 /*!
430  * @function lck_rw_unlock_shared
431  *
432  * @abstract
433  * Unlocks a rw_lock previously locked in shared mode.
434  *
435  * @discussion
436  * The same thread that locked the lock needs to unlock it.
437  *
438  * @param lck           rw_lock held in shared mode to unlock.
439  */
440 extern void             lck_rw_unlock_shared(
441 	lck_rw_t                *lck);
442 
443 /*!
444  * @function lck_rw_lock_exclusive
445  *
446  * @abstract
447  * Locks a rw_lock in exclusive mode.
448  *
449  * @discussion
450  * This function can block.
451  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
452  * can acquire it in exclusive mode.
453  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
454  *
455  * @param lck           rw_lock to lock.
456  */
457 extern void             lck_rw_lock_exclusive(
458 	lck_rw_t                *lck);
459 
460 /*!
461  * @function lck_rw_lock_exclusive_to_shared
462  *
463  * @abstract
464  * Downgrades a rw_lock held in exclusive mode to shared.
465  *
466  * @discussion
467  * The caller needs to hold the lock in exclusive mode to be able to downgrade it.
468  *
469  * @param lck           rw_lock already held in exclusive mode to downgrade.
470  */
471 extern void             lck_rw_lock_exclusive_to_shared(
472 	lck_rw_t                *lck);
473 
474 /*!
475  * @function lck_rw_unlock_exclusive
476  *
477  * @abstract
478  * Unlocks a rw_lock previously locked in exclusive mode.
479  *
480  * @discussion
481  * The same thread that locked the lock needs to unlock it.
482  *
483  * @param lck           rw_lock held in exclusive mode to unlock.
484  */
485 extern void             lck_rw_unlock_exclusive(
486 	lck_rw_t                *lck);
487 
488 /*!
489  * @function lck_rw_sleep
490  *
491  * @abstract
492  * Assert_wait on an event while holding the rw_lock.
493  *
494  * @discussion
495  * the flags can decide how to re-acquire the lock upon wake up
496  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
497  * and if the priority needs to be kept boosted until the lock is
498  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
499  *
500  * @param lck                   rw_lock to use to synch the assert_wait.
501  * @param lck_sleep_action      flags.
502  * @param event                 event to assert_wait on.
503  * @param interruptible         wait type.
504  */
505 extern wait_result_t    lck_rw_sleep(
506 	lck_rw_t                *lck,
507 	lck_sleep_action_t      lck_sleep_action,
508 	event_t                 event,
509 	wait_interrupt_t        interruptible);
510 
511 /*!
512  * @function lck_rw_sleep_deadline
513  *
514  * @abstract
515  * Assert_wait_deadline on an event while holding the rw_lock.
516  *
517  * @discussion
518  * the flags can decide how to re-acquire the lock upon wake up
519  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
520  * and if the priority needs to be kept boosted until the lock is
521  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
522  *
523  * @param lck                   rw_lock to use to synch the assert_wait.
524  * @param lck_sleep_action      flags.
525  * @param event                 event to assert_wait on.
526  * @param interruptible         wait type.
527  * @param deadline              maximum time after which being woken up
528  */
529 extern wait_result_t    lck_rw_sleep_deadline(
530 	lck_rw_t                *lck,
531 	lck_sleep_action_t      lck_sleep_action,
532 	event_t                 event,
533 	wait_interrupt_t        interruptible,
534 	uint64_t                deadline);
535 
536 #ifdef  XNU_KERNEL_PRIVATE
537 
538 /*!
539  * @function kdp_lck_rw_lock_is_acquired_exclusive
540  *
541  * @abstract
542  * Checks if a rw_lock is held exclusevely.
543  *
544  * @discussion
545  * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
546  *
547  * @param lck   lock to check
548  *
549  * @returns TRUE if the lock is held exclusevely
550  */
551 extern boolean_t        kdp_lck_rw_lock_is_acquired_exclusive(
552 	lck_rw_t                *lck);
553 
554 /*!
555  * @function lck_rw_lock_exclusive_check_contended
556  *
557  * @abstract
558  * Locks a rw_lock in exclusive mode.
559  *
560  * @discussion
561  * This routine IS EXPERIMENTAL.
562  * It's only used for the vm object lock, and use for other subsystems is UNSUPPORTED.
563  * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's contention.
564  *
565  * @param lck           rw_lock to lock.
566  *
567  * @returns Returns TRUE if the thread spun or blocked while attempting to acquire the lock, FALSE
568  *          otherwise.
569  */
570 extern bool             lck_rw_lock_exclusive_check_contended(
571 	lck_rw_t                *lck);
572 
573 /*!
574  * @function lck_rw_lock_yield_shared
575  *
576  * @abstract
577  * Yields a rw_lock held in shared mode.
578  *
579  * @discussion
580  * This function can block.
581  * Yields the lock in case there are writers waiting.
582  * The yield will unlock, block, and re-lock the lock in shared mode.
583  *
584  * @param lck           rw_lock already held in shared mode to yield.
585  * @param force_yield   if set to true it will always yield irrespective of the lock status
586  *
587  * @returns TRUE if the lock was yield, FALSE otherwise
588  */
589 extern bool             lck_rw_lock_yield_shared(
590 	lck_rw_t                *lck,
591 	boolean_t               force_yield);
592 
593 /*!
594  * @function lck_rw_lock_would_yield_shared
595  *
596  * @abstract
597  * Check whether a rw_lock currently held in shared mode would be yielded
598  *
599  * @discussion
600  * This function can be used when lck_rw_lock_yield_shared would be
601  * inappropriate due to the need to perform additional housekeeping
602  * prior to any yield or when the caller may wish to prematurely terminate
603  * an operation rather than resume it after regaining the lock.
604  *
605  * @param lck           rw_lock already held in shared mode to test for possible yield.
606  *
607  * @returns TRUE if the lock would be yielded, FALSE otherwise
608  */
609 extern bool             lck_rw_lock_would_yield_shared(
610 	lck_rw_t                *lck);
611 
612 
613 
614 __enum_decl(lck_rw_yield_t, uint32_t, {
615 	LCK_RW_YIELD_WRITERS_ONLY,
616 	LCK_RW_YIELD_ANY_WAITER,
617 	LCK_RW_YIELD_ALWAYS,
618 });
619 
620 /*!
621  * @function lck_rw_lock_yield_exclusive
622  *
623  * @abstract
624  * Yields a rw_lock held in exclusive mode.
625  *
626  * @discussion
627  * This function can block.
628  * Yields the lock in case there are writers waiting.
629  * The yield will unlock, block, and re-lock the lock in exclusive mode.
630  *
631  * @param lck           rw_lock already held in exclusive mode to yield.
632  * @param mode          when to yield.
633  *
634  * @returns TRUE if the lock was yield, FALSE otherwise
635  */
636 extern bool             lck_rw_lock_yield_exclusive(
637 	lck_rw_t                *lck,
638 	lck_rw_yield_t          mode);
639 
640 /*!
641  * @function lck_rw_lock_would_yield_exclusive
642  *
643  *
644  * @abstract
645  * Check whether a rw_lock currently held in exclusive mode would be yielded
646  *
647  * @discussion
648  * This function can be used when lck_rw_lock_yield_exclusive would be
649  * inappropriate due to the need to perform additional housekeeping
650  * prior to any yield or when the caller may wish to prematurely terminate
651  * an operation rather than resume it after regaining the lock.
652  *
653  * @param lck           rw_lock already held in exclusive mode to yield.
654  * @param mode          conditions for a possible yield
655  *
656  * @returns TRUE if the lock would be yielded, FALSE otherwise
657  */
658 extern bool             lck_rw_lock_would_yield_exclusive(
659 	lck_rw_t                *lck,
660 	lck_rw_yield_t          mode);
661 
662 #endif /* XNU_KERNEL_PRIVATE */
663 
664 #if MACH_KERNEL_PRIVATE
665 
666 /*!
667  * @function lck_rw_lock_count_inc
668  *
669  * @abstract
670  * Increments the number of rwlock held by the (current) thread.
671  */
672 extern void lck_rw_lock_count_inc(
673 	thread_t                thread,
674 	const void             *lock);
675 
676 /*!
677  * @function lck_rw_lock_count_inc
678  *
679  * @abstract
680  * Decrements the number of rwlock held by the (current) thread.
681  */
682 extern void lck_rw_lock_count_dec(
683 	thread_t                thread,
684 	const void             *lock);
685 
686 /*!
687  * @function lck_rw_set_promotion_locked
688  *
689  * @abstract
690  * Callout from context switch if the thread goes
691  * off core with a positive rwlock_count.
692  *
693  * @discussion
694  * Called at splsched with the thread locked.
695  *
696  * @param thread        thread to promote.
697  */
698 extern void             lck_rw_set_promotion_locked(
699 	thread_t                thread);
700 
701 #endif /* MACH_KERNEL_PRIVATE */
702 
703 #ifdef  KERNEL_PRIVATE
704 
705 /*!
706  * @function lck_rw_try_lock_shared
707  *
708  * @abstract
709  * Tries to locks a rw_lock in read mode.
710  *
711  * @discussion
712  * This function will return and not block in case the lock is already held.
713  * See lck_rw_lock_shared for more details.
714  *
715  * @param lck           rw_lock to lock.
716  *
717  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
718  */
719 extern boolean_t        lck_rw_try_lock_shared(
720 	lck_rw_t                *lck);
721 
722 /*!
723  * @function lck_rw_try_lock_exclusive
724  *
725  * @abstract
726  * Tries to locks a rw_lock in write mode.
727  *
728  * @discussion
729  * This function will return and not block in case the lock is already held.
730  * See lck_rw_lock_exclusive for more details.
731  *
732  * @param lck           rw_lock to lock.
733  *
734  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
735  */
736 extern boolean_t        lck_rw_try_lock_exclusive(
737 	lck_rw_t                *lck);
738 
739 /*!
740  * @function lck_rw_done
741  *
742  * @abstract
743  * Force unlocks a rw_lock without consistency checks.
744  *
745  * @discussion
746  * Do not use unless sure you can avoid consistency checks.
747  *
748  * @param lck           rw_lock to unlock.
749  */
750 extern lck_rw_type_t    lck_rw_done(
751 	lck_rw_t                *lck);
752 
753 #define LCK_RW_ASSERT_SHARED    0x01
754 #define LCK_RW_ASSERT_EXCLUSIVE 0x02
755 #define LCK_RW_ASSERT_HELD      0x03
756 #define LCK_RW_ASSERT_NOTHELD   0x04
757 
758 /*!
759  * @function lck_rw_assert
760  *
761  * @abstract
762  * Asserts the rw_lock is held.
763  *
764  * @discussion
765  * read-write locks do not have a concept of ownership when held in shared mode,
766  * so this function merely asserts that someone is holding the lock, not necessarily the caller.
767  * However if rw_lock_debug is on, a best effort mechanism to track the owners is in place, and
768  * this function can be more accurate.
769  * Type can be LCK_RW_ASSERT_SHARED, LCK_RW_ASSERT_EXCLUSIVE, LCK_RW_ASSERT_HELD
770  * LCK_RW_ASSERT_NOTHELD.
771  *
772  * @param lck   rw_lock to check.
773  * @param type  assert type
774  */
775 extern void             lck_rw_assert(
776 	lck_rw_t                *lck,
777 	unsigned int            type);
778 
779 #endif /* KERNEL_PRIVATE */
780 
781 __END_DECLS
782 
783 #endif /* _KERN_RW_LOCK_H_ */
784