xref: /xnu-11417.140.69/osfmk/kern/lock_rw.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_RW_LOCK_H_
30 #define _KERN_RW_LOCK_H_
31 
32 #include <kern/lock_types.h>
33 #include <kern/lock_group.h>
34 #include <kern/lock_attr.h>
35 
36 #ifdef  XNU_KERNEL_PRIVATE
37 #include <kern/startup.h>
38 #endif /* XNU_KERNEL_PRIVATE */
39 
40 __BEGIN_DECLS
41 
42 #ifdef  MACH_KERNEL_PRIVATE
43 
44 typedef union {
45 	struct {
46 		uint16_t        shared_count;       /* No. of shared granted request */
47 		uint16_t
48 		    interlock:              1,      /* Interlock */
49 		    priv_excl:              1,      /* priority for Writer */
50 		    want_upgrade:           1,      /* Read-to-write upgrade waiting */
51 		    want_excl:              1,      /* Writer is waiting, or locked for write */
52 		    r_waiting:              1,      /* Someone is sleeping on lock */
53 		    w_waiting:              1,      /* Writer is sleeping on lock */
54 		    can_sleep:              1,      /* Can attempts to lock go to sleep? */
55 		    _pad2:                  8,      /* padding */
56 		    tag_valid:              1;      /* Field is actually a tag, not a bitfield */
57 	};
58 	uint32_t        data;                       /* Single word version of bitfields and shared count */
59 } lck_rw_word_t;
60 
61 typedef struct lck_rw_s {
62 	uint32_t        lck_rw_unused : 24; /* tsid one day ... */
63 	uint32_t        lck_rw_type   :  8; /* LCK_TYPE_RW */
64 	uint32_t        lck_rw_padding;
65 	lck_rw_word_t   lck_rw;
66 	uint32_t        lck_rw_owner;       /* ctid_t */
67 } lck_rw_t;     /* arm: 8  arm64: 16 x86: 16 */
68 
69 #define lck_rw_shared_count     lck_rw.shared_count
70 #define lck_rw_interlock        lck_rw.interlock
71 #define lck_rw_priv_excl        lck_rw.priv_excl
72 #define lck_rw_want_upgrade     lck_rw.want_upgrade
73 #define lck_rw_want_excl        lck_rw.want_excl
74 #define lck_r_waiting           lck_rw.r_waiting
75 #define lck_w_waiting           lck_rw.w_waiting
76 #define lck_rw_can_sleep        lck_rw.can_sleep
77 #define lck_rw_data             lck_rw.data
78 // tag and data reference the same memory. When the tag_valid bit is set,
79 // the data word should be treated as a tag instead of a bitfield.
80 #define lck_rw_tag_valid        lck_rw.tag_valid
81 #define lck_rw_tag              lck_rw.data
82 
83 #define LCK_RW_SHARED_READER_OFFSET      0
84 #define LCK_RW_INTERLOCK_BIT            16
85 #define LCK_RW_PRIV_EXCL_BIT            17
86 #define LCK_RW_WANT_UPGRADE_BIT         18
87 #define LCK_RW_WANT_EXCL_BIT            19
88 #define LCK_RW_R_WAITING_BIT            20
89 #define LCK_RW_W_WAITING_BIT            21
90 #define LCK_RW_CAN_SLEEP_BIT            22
91 //                                      23-30
92 #define LCK_RW_TAG_VALID_BIT            31
93 
94 #define LCK_RW_INTERLOCK                (1U << LCK_RW_INTERLOCK_BIT)
95 #define LCK_RW_R_WAITING                (1U << LCK_RW_R_WAITING_BIT)
96 #define LCK_RW_W_WAITING                (1U << LCK_RW_W_WAITING_BIT)
97 #define LCK_RW_WANT_UPGRADE             (1U << LCK_RW_WANT_UPGRADE_BIT)
98 #define LCK_RW_WANT_EXCL                (1U << LCK_RW_WANT_EXCL_BIT)
99 #define LCK_RW_TAG_VALID                (1U << LCK_RW_TAG_VALID_BIT)
100 #define LCK_RW_PRIV_EXCL                (1U << LCK_RW_PRIV_EXCL_BIT)
101 #define LCK_RW_SHARED_MASK              (0xffff << LCK_RW_SHARED_READER_OFFSET)
102 #define LCK_RW_SHARED_READER            (0x1 << LCK_RW_SHARED_READER_OFFSET)
103 
104 #define LCK_RW_TAG_DESTROYED            ((LCK_RW_TAG_VALID | 0xdddddeadu))      /* lock marked as Destroyed */
105 
106 #elif KERNEL_PRIVATE
107 typedef struct {
108 	uintptr_t               opaque[2] __kernel_data_semantics;
109 } lck_rw_t;
110 #else /* @KERNEL_PRIVATE */
111 typedef struct __lck_rw_t__     lck_rw_t;
112 #endif /* !KERNEL_PRIVATE */
113 
114 #if DEVELOPMENT || DEBUG
115 #ifdef XNU_KERNEL_PRIVATE
116 
117 #define DEBUG_RW                        1
118 #define LCK_RW_EXPECTED_MAX_NUMBER      3       /* Expected number per thread of concurrently held rw_lock */
119 
120 #if __LP64__
121 #define LCK_RW_CALLER_PACKED_BITS   48
122 #define LCK_RW_CALLER_PACKED_SHIFT   0
123 #define LCK_RW_CALLER_PACKED_BASE    0
124 #else
125 #define LCK_RW_CALLER_PACKED_BITS   32
126 #define LCK_RW_CALLER_PACKED_SHIFT   0
127 #define LCK_RW_CALLER_PACKED_BASE    0
128 #endif
129 
130 _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(LCK_RW_CALLER_PACKED),
131     "Make sure the rwlde_caller_packed pointer packing is based on arithmetic shifts");
132 
133 
134 struct __attribute__ ((packed)) rw_lock_debug_entry {
135 	lck_rw_t      *rwlde_lock;                                       // rw_lock held
136 	int8_t        rwlde_mode_count;                                  // -1 is held in write mode, positive value is the recursive read count
137 #if __LP64__
138 	uintptr_t     rwlde_caller_packed: LCK_RW_CALLER_PACKED_BITS;    // caller that created the entry
139 #else
140 	uintptr_t     rwlde_caller_packed;                               // caller that created the entry
141 #endif
142 };
143 typedef struct rw_lock_debug {
144 	struct rw_lock_debug_entry rwld_locks[LCK_RW_EXPECTED_MAX_NUMBER]; /* rw_lock debug info of currently held locks */
145 	uint8_t                    rwld_locks_saved : 7,                   /* number of locks saved in rwld_locks */
146 	    rwld_overflow : 1;                                             /* lock_entry was full, so it might be inaccurate */
147 	uint32_t                   rwld_locks_acquired;                    /* number of locks acquired */
148 } rw_lock_debug_t;
149 
150 _Static_assert(LCK_RW_EXPECTED_MAX_NUMBER <= 127, "LCK_RW_EXPECTED_MAX_NUMBER bigger than rwld_locks_saved");
151 
152 #endif /* XNU_KERNEL_PRIVATE */
153 #endif /* DEVELOPMENT || DEBUG */
154 
155 typedef unsigned int     lck_rw_type_t;
156 
157 #define LCK_RW_TYPE_SHARED              0x01
158 #define LCK_RW_TYPE_EXCLUSIVE           0x02
159 
160 #define decl_lck_rw_data(class, name)   class lck_rw_t name
161 
162 #if XNU_KERNEL_PRIVATE
163 /*
164  * Auto-initializing rw-locks declarations
165  * ------------------------------------
166  *
167  * Unless you need to configure your locks in very specific ways,
168  * there is no point creating explicit lock attributes. For most
169  * static locks, this declaration macro can be used:
170  *
171  * - LCK_RW_DECLARE.
172  *
173  * For cases when some particular attributes need to be used,
174  * LCK_RW_DECLARE_ATTR takes a variable declared with
175  * LCK_ATTR_DECLARE as an argument.
176  */
177 
178 struct lck_rw_startup_spec {
179 	lck_rw_t                *lck;
180 	lck_grp_t               *lck_grp;
181 	lck_attr_t              *lck_attr;
182 };
183 
184 extern void             lck_rw_startup_init(
185 	struct lck_rw_startup_spec *spec);
186 
187 #define LCK_RW_DECLARE_ATTR(var, grp, attr) \
188 	lck_rw_t var; \
189 	static __startup_data struct lck_rw_startup_spec \
190 	__startup_lck_rw_spec_ ## var = { &var, grp, attr }; \
191 	STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_rw_startup_init, \
192 	    &__startup_lck_rw_spec_ ## var)
193 
194 #define LCK_RW_DECLARE(var, grp) \
195 	LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
196 
197 #if DEBUG_RW
198 STATIC_IF_KEY_DECLARE_TRUE(lck_rw_assert);
199 
200 #define lck_rw_assert_enabled()    improbable_static_if(lck_rw_assert)
201 
202 extern void lck_rw_assert_init(
203 	const char             *args,
204 	uint64_t                kf_ovrd);
205 
206 #define LCK_RW_ASSERT(lck, type)  do { \
207 	if (lck_rw_assert_enabled()) { \
208 	        lck_rw_assert(lck, type); \
209 	} \
210 } while (0)
211 #else /* DEBUG_RW */
212 #define LCK_RW_ASSERT(lck, type)
213 #define lck_rw_assert_enabled()    0
214 #endif /* DEBUG_RW */
215 
216 #endif /* XNU_KERNEL_PRIVATE */
217 
218 
219 /*!
220  * @function lck_rw_alloc_init
221  *
222  * @abstract
223  * Allocates and initializes a rw_lock_t.
224  *
225  * @discussion
226  * The function can block. See lck_rw_init() for initialization details.
227  *
228  * @param grp           lock group to associate with the lock.
229  * @param attr          lock attribute to initialize the lock.
230  *
231  * @returns             NULL or the allocated lock
232  */
233 extern lck_rw_t         *lck_rw_alloc_init(
234 	lck_grp_t               *grp,
235 	lck_attr_t              *attr);
236 
237 /*!
238  * @function lck_rw_init
239  *
240  * @abstract
241  * Initializes a rw_lock_t.
242  *
243  * @discussion
244  * Usage statistics for the lock are going to be added to the lock group provided.
245  *
246  * The lock attribute can be LCK_ATTR_NULL or an attribute can be allocated with
247  * lck_attr_alloc_init. So far however none of the attribute settings are supported.
248  *
249  * @param lck           lock to initialize.
250  * @param grp           lock group to associate with the lock.
251  * @param attr          lock attribute to initialize the lock.
252  */
253 extern void             lck_rw_init(
254 	lck_rw_t                *lck,
255 	lck_grp_t               *grp,
256 	lck_attr_t              *attr);
257 
258 /*!
259  * @function lck_rw_free
260  *
261  * @abstract
262  * Frees a rw_lock previously allocated with lck_rw_alloc_init().
263  *
264  * @discussion
265  * The lock must be not held by any thread.
266  *
267  * @param lck           rw_lock to free.
268  */
269 extern void             lck_rw_free(
270 	lck_rw_t                *lck,
271 	lck_grp_t               *grp);
272 
273 /*!
274  * @function lck_rw_destroy
275  *
276  * @abstract
277  * Destroys a rw_lock previously initialized with lck_rw_init().
278  *
279  * @discussion
280  * The lock must be not held by any thread.
281  *
282  * @param lck           rw_lock to destroy.
283  */
284 extern void             lck_rw_destroy(
285 	lck_rw_t                *lck,
286 	lck_grp_t               *grp);
287 
288 /*!
289  * @function lck_rw_lock
290  *
291  * @abstract
292  * Locks a rw_lock with the specified type.
293  *
294  * @discussion
295  * See lck_rw_lock_shared() or lck_rw_lock_exclusive() for more details.
296  *
297  * @param lck           rw_lock to lock.
298  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
299  */
300 extern void             lck_rw_lock(
301 	lck_rw_t                *lck,
302 	lck_rw_type_t           lck_rw_type);
303 
304 /*!
305  * @function lck_rw_try_lock
306  *
307  * @abstract
308  * Tries to locks a rw_lock with the specified type.
309  *
310  * @discussion
311  * This function will return and not wait/block in case the lock is already held.
312  * See lck_rw_try_lock_shared() or lck_rw_try_lock_exclusive() for more details.
313  *
314  * @param lck           rw_lock to lock.
315  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
316  *
317  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
318  */
319 extern boolean_t        lck_rw_try_lock(
320 	lck_rw_t                *lck,
321 	lck_rw_type_t           lck_rw_type);
322 
323 /*!
324  * @function lck_rw_unlock
325  *
326  * @abstract
327  * Unlocks a rw_lock previously locked with lck_rw_type.
328  *
329  * @discussion
330  * The lock must be unlocked by the same thread it was locked from.
331  * The type of the lock/unlock have to match, unless an upgrade/downgrade was performed while
332  * holding the lock.
333  *
334  * @param lck           rw_lock to unlock.
335  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
336  */
337 extern void             lck_rw_unlock(
338 	lck_rw_t                *lck,
339 	lck_rw_type_t           lck_rw_type);
340 
341 /*!
342  * @function lck_rw_lock_shared
343  *
344  * @abstract
345  * Locks a rw_lock in shared mode.
346  *
347  * @discussion
348  * This function can block.
349  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
350  * can acquire it in exclusive mode.
351  * If the lock is held in shared mode and there are no writers waiting, a reader will be able to acquire
352  * the lock without waiting.
353  * If the lock is held in shared mode and there is at least a writer waiting, a reader will wait
354  * for all the writers to make progress.
355  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
356  *
357  * @param lck           rw_lock to lock.
358  */
359 extern void             lck_rw_lock_shared(
360 	lck_rw_t                *lck);
361 
362 
363 #if MACH_KERNEL_PRIVATE
364 /*!
365  * @function lck_rw_lock_shared_b
366  *
367  * @abstract
368  * Locks a rw_lock in shared mode. Returns early if the lock can't be acquired
369  * and the specified block returns true.
370  *
371  * @discussion
372  * Identical to lck_rw_lock_shared() but can return early if the lock can't be
373  * acquired and the specified block returns true. The block is called
374  * repeatedly when waiting to acquire the lock.
375  * Should only be called when the lock cannot sleep (i.e. when
376  * lock->lck_rw_can_sleep is false).
377  *
378  * @param lock           rw_lock to lock.
379  * @param lock_pause     block invoked while waiting to acquire lock
380  *
381  * @returns              Returns TRUE if the lock is successfully taken,
382  *                       FALSE if the block returns true and the lock has
383  *                       not been acquired.
384  */
385 extern boolean_t
386     lck_rw_lock_shared_b(
387 	lck_rw_t        * lock,
388 	bool            (^lock_pause)(void));
389 
390 /*!
391  * @function lck_rw_lock_exclusive_b
392  *
393  * @abstract
394  * Locks a rw_lock in exclusive mode. Returns early if the lock can't be acquired
395  * and the specified block returns true.
396  *
397  * @discussion
398  * Identical to lck_rw_lock_exclusive() but can return early if the lock can't be
399  * acquired and the specified block returns true. The block is called
400  * repeatedly when waiting to acquire the lock.
401  * Should only be called when the lock cannot sleep (i.e. when
402  * lock->lck_rw_can_sleep is false).
403  *
404  * @param lock           rw_lock to lock.
405  * @param lock_pause     block invoked while waiting to acquire lock
406  *
407  * @returns              Returns TRUE if the lock is successfully taken,
408  *                       FALSE if the block returns true and the lock has
409  *                       not been acquired.
410  */
411 extern boolean_t
412     lck_rw_lock_exclusive_b(
413 	lck_rw_t        * lock,
414 	bool            (^lock_pause)(void));
415 #endif /* MACH_KERNEL_PRIVATE */
416 
417 /*!
418  * @function lck_rw_lock_shared_to_exclusive
419  *
420  * @abstract
421  * Upgrades a rw_lock held in shared mode to exclusive.
422  *
423  * @discussion
424  * This function can block.
425  * Only one reader at a time can upgrade to exclusive mode. If the upgrades fails the function will
426  * return with the lock not held.
427  * The caller needs to hold the lock in shared mode to upgrade it.
428  *
429  * @param lck           rw_lock already held in shared mode to upgrade.
430  *
431  * @returns TRUE if the lock was upgraded, FALSE if it was not possible.
432  *          If the function was not able to upgrade the lock, the lock will be dropped
433  *          by the function.
434  */
435 extern boolean_t        lck_rw_lock_shared_to_exclusive(
436 	lck_rw_t                *lck);
437 
438 /*!
439  * @function lck_rw_unlock_shared
440  *
441  * @abstract
442  * Unlocks a rw_lock previously locked in shared mode.
443  *
444  * @discussion
445  * The same thread that locked the lock needs to unlock it.
446  *
447  * @param lck           rw_lock held in shared mode to unlock.
448  */
449 extern void             lck_rw_unlock_shared(
450 	lck_rw_t                *lck);
451 
452 /*!
453  * @function lck_rw_lock_exclusive
454  *
455  * @abstract
456  * Locks a rw_lock in exclusive mode.
457  *
458  * @discussion
459  * This function can block.
460  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
461  * can acquire it in exclusive mode.
462  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
463  *
464  * @param lck           rw_lock to lock.
465  */
466 extern void             lck_rw_lock_exclusive(
467 	lck_rw_t                *lck);
468 
469 /*!
470  * @function lck_rw_lock_exclusive_to_shared
471  *
472  * @abstract
473  * Downgrades a rw_lock held in exclusive mode to shared.
474  *
475  * @discussion
476  * The caller needs to hold the lock in exclusive mode to be able to downgrade it.
477  *
478  * @param lck           rw_lock already held in exclusive mode to downgrade.
479  */
480 extern void             lck_rw_lock_exclusive_to_shared(
481 	lck_rw_t                *lck);
482 
483 /*!
484  * @function lck_rw_unlock_exclusive
485  *
486  * @abstract
487  * Unlocks a rw_lock previously locked in exclusive mode.
488  *
489  * @discussion
490  * The same thread that locked the lock needs to unlock it.
491  *
492  * @param lck           rw_lock held in exclusive mode to unlock.
493  */
494 extern void             lck_rw_unlock_exclusive(
495 	lck_rw_t                *lck);
496 
497 /*!
498  * @function lck_rw_sleep
499  *
500  * @abstract
501  * Assert_wait on an event while holding the rw_lock.
502  *
503  * @discussion
504  * the flags can decide how to re-acquire the lock upon wake up
505  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
506  * and if the priority needs to be kept boosted until the lock is
507  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
508  *
509  * @param lck                   rw_lock to use to synch the assert_wait.
510  * @param lck_sleep_action      flags.
511  * @param event                 event to assert_wait on.
512  * @param interruptible         wait type.
513  */
514 extern wait_result_t    lck_rw_sleep(
515 	lck_rw_t                *lck,
516 	lck_sleep_action_t      lck_sleep_action,
517 	event_t                 event,
518 	wait_interrupt_t        interruptible);
519 
520 /*!
521  * @function lck_rw_sleep_deadline
522  *
523  * @abstract
524  * Assert_wait_deadline on an event while holding the rw_lock.
525  *
526  * @discussion
527  * the flags can decide how to re-acquire the lock upon wake up
528  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
529  * and if the priority needs to be kept boosted until the lock is
530  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
531  *
532  * @param lck                   rw_lock to use to synch the assert_wait.
533  * @param lck_sleep_action      flags.
534  * @param event                 event to assert_wait on.
535  * @param interruptible         wait type.
536  * @param deadline              maximum time after which being woken up
537  */
538 extern wait_result_t    lck_rw_sleep_deadline(
539 	lck_rw_t                *lck,
540 	lck_sleep_action_t      lck_sleep_action,
541 	event_t                 event,
542 	wait_interrupt_t        interruptible,
543 	uint64_t                deadline);
544 
545 #ifdef  XNU_KERNEL_PRIVATE
546 
547 /*!
548  * @function kdp_lck_rw_lock_is_acquired_exclusive
549  *
550  * @abstract
551  * Checks if a rw_lock is held exclusevely.
552  *
553  * @discussion
554  * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
555  *
556  * @param lck   lock to check
557  *
558  * @returns TRUE if the lock is held exclusevely
559  */
560 extern boolean_t        kdp_lck_rw_lock_is_acquired_exclusive(
561 	lck_rw_t                *lck);
562 
563 /*!
564  * @function lck_rw_lock_exclusive_check_contended
565  *
566  * @abstract
567  * Locks a rw_lock in exclusive mode.
568  *
569  * @discussion
570  * This routine IS EXPERIMENTAL.
571  * It's only used for the vm object lock, and use for other subsystems is UNSUPPORTED.
572  * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's contention.
573  *
574  * @param lck           rw_lock to lock.
575  *
576  * @returns Returns TRUE if the thread spun or blocked while attempting to acquire the lock, FALSE
577  *          otherwise.
578  */
579 extern bool             lck_rw_lock_exclusive_check_contended(
580 	lck_rw_t                *lck);
581 
582 /*!
583  * @function lck_rw_lock_yield_shared
584  *
585  * @abstract
586  * Yields a rw_lock held in shared mode.
587  *
588  * @discussion
589  * This function can block.
590  * Yields the lock in case there are writers waiting.
591  * The yield will unlock, block, and re-lock the lock in shared mode.
592  *
593  * @param lck           rw_lock already held in shared mode to yield.
594  * @param force_yield   if set to true it will always yield irrespective of the lock status
595  *
596  * @returns TRUE if the lock was yield, FALSE otherwise
597  */
598 extern bool             lck_rw_lock_yield_shared(
599 	lck_rw_t                *lck,
600 	boolean_t               force_yield);
601 
602 /*!
603  * @function lck_rw_lock_would_yield_shared
604  *
605  * @abstract
606  * Check whether a rw_lock currently held in shared mode would be yielded
607  *
608  * @discussion
609  * This function can be used when lck_rw_lock_yield_shared would be
610  * inappropriate due to the need to perform additional housekeeping
611  * prior to any yield or when the caller may wish to prematurely terminate
612  * an operation rather than resume it after regaining the lock.
613  *
614  * @param lck           rw_lock already held in shared mode to test for possible yield.
615  *
616  * @returns TRUE if the lock would be yielded, FALSE otherwise
617  */
618 extern bool             lck_rw_lock_would_yield_shared(
619 	lck_rw_t                *lck);
620 
621 
622 
623 __enum_decl(lck_rw_yield_t, uint32_t, {
624 	LCK_RW_YIELD_WRITERS_ONLY,
625 	LCK_RW_YIELD_ANY_WAITER,
626 	LCK_RW_YIELD_ALWAYS,
627 });
628 
629 /*!
630  * @function lck_rw_lock_yield_exclusive
631  *
632  * @abstract
633  * Yields a rw_lock held in exclusive mode.
634  *
635  * @discussion
636  * This function can block.
637  * Yields the lock in case there are writers waiting.
638  * The yield will unlock, block, and re-lock the lock in exclusive mode.
639  *
640  * @param lck           rw_lock already held in exclusive mode to yield.
641  * @param mode          when to yield.
642  *
643  * @returns TRUE if the lock was yield, FALSE otherwise
644  */
645 extern bool             lck_rw_lock_yield_exclusive(
646 	lck_rw_t                *lck,
647 	lck_rw_yield_t          mode);
648 
649 /*!
650  * @function lck_rw_lock_would_yield_exclusive
651  *
652  *
653  * @abstract
654  * Check whether a rw_lock currently held in exclusive mode would be yielded
655  *
656  * @discussion
657  * This function can be used when lck_rw_lock_yield_exclusive would be
658  * inappropriate due to the need to perform additional housekeeping
659  * prior to any yield or when the caller may wish to prematurely terminate
660  * an operation rather than resume it after regaining the lock.
661  *
662  * @param lck           rw_lock already held in exclusive mode to yield.
663  * @param mode          conditions for a possible yield
664  *
665  * @returns TRUE if the lock would be yielded, FALSE otherwise
666  */
667 extern bool             lck_rw_lock_would_yield_exclusive(
668 	lck_rw_t                *lck,
669 	lck_rw_yield_t          mode);
670 
671 #endif /* XNU_KERNEL_PRIVATE */
672 
673 #if MACH_KERNEL_PRIVATE
674 
675 /*!
676  * @function lck_rw_lock_count_inc
677  *
678  * @abstract
679  * Increments the number of rwlock held by the (current) thread.
680  */
681 extern void lck_rw_lock_count_inc(
682 	thread_t                thread,
683 	const void             *lock);
684 
685 /*!
686  * @function lck_rw_lock_count_inc
687  *
688  * @abstract
689  * Decrements the number of rwlock held by the (current) thread.
690  */
691 extern void lck_rw_lock_count_dec(
692 	thread_t                thread,
693 	const void             *lock);
694 
695 /*!
696  * @function lck_rw_set_promotion_locked
697  *
698  * @abstract
699  * Callout from context switch if the thread goes
700  * off core with a positive rwlock_count.
701  *
702  * @discussion
703  * Called at splsched with the thread locked.
704  *
705  * @param thread        thread to promote.
706  */
707 extern void             lck_rw_set_promotion_locked(
708 	thread_t                thread);
709 
710 #endif /* MACH_KERNEL_PRIVATE */
711 
712 #ifdef  KERNEL_PRIVATE
713 
714 /*!
715  * @function lck_rw_try_lock_shared
716  *
717  * @abstract
718  * Tries to locks a rw_lock in read mode.
719  *
720  * @discussion
721  * This function will return and not block in case the lock is already held.
722  * See lck_rw_lock_shared for more details.
723  *
724  * @param lck           rw_lock to lock.
725  *
726  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
727  */
728 extern boolean_t        lck_rw_try_lock_shared(
729 	lck_rw_t                *lck);
730 
731 /*!
732  * @function lck_rw_try_lock_exclusive
733  *
734  * @abstract
735  * Tries to locks a rw_lock in write mode.
736  *
737  * @discussion
738  * This function will return and not block in case the lock is already held.
739  * See lck_rw_lock_exclusive for more details.
740  *
741  * @param lck           rw_lock to lock.
742  *
743  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
744  */
745 extern boolean_t        lck_rw_try_lock_exclusive(
746 	lck_rw_t                *lck);
747 
748 /*!
749  * @function lck_rw_done
750  *
751  * @abstract
752  * Force unlocks a rw_lock without consistency checks.
753  *
754  * @discussion
755  * Do not use unless sure you can avoid consistency checks.
756  *
757  * @param lck           rw_lock to unlock.
758  */
759 extern lck_rw_type_t    lck_rw_done(
760 	lck_rw_t                *lck);
761 
762 #define LCK_RW_ASSERT_SHARED    0x01
763 #define LCK_RW_ASSERT_EXCLUSIVE 0x02
764 #define LCK_RW_ASSERT_HELD      0x03
765 #define LCK_RW_ASSERT_NOTHELD   0x04
766 
767 /*!
768  * @function lck_rw_assert
769  *
770  * @abstract
771  * Asserts the rw_lock is held.
772  *
773  * @discussion
774  * read-write locks do not have a concept of ownership when held in shared mode,
775  * so this function merely asserts that someone is holding the lock, not necessarily the caller.
776  * However if rw_lock_debug is on, a best effort mechanism to track the owners is in place, and
777  * this function can be more accurate.
778  * Type can be LCK_RW_ASSERT_SHARED, LCK_RW_ASSERT_EXCLUSIVE, LCK_RW_ASSERT_HELD
779  * LCK_RW_ASSERT_NOTHELD.
780  *
781  * @param lck   rw_lock to check.
782  * @param type  assert type
783  */
784 extern void             lck_rw_assert(
785 	lck_rw_t                *lck,
786 	unsigned int            type);
787 
788 #endif /* KERNEL_PRIVATE */
789 
790 __END_DECLS
791 
792 #endif /* _KERN_RW_LOCK_H_ */
793