xref: /xnu-8020.101.4/osfmk/kern/lock_rw.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_RW_LOCK_H_
30 #define _KERN_RW_LOCK_H_
31 
32 #include <kern/lock_types.h>
33 #include <kern/lock_group.h>
34 #include <kern/lock_attr.h>
35 
36 #ifdef  XNU_KERNEL_PRIVATE
37 #include <kern/startup.h>
38 #endif /* XNU_KERNEL_PRIVATE */
39 
40 __BEGIN_DECLS
41 
42 #ifdef  MACH_KERNEL_PRIVATE
43 
44 typedef union {
45 	struct {
46 		uint16_t        shared_count;       /* No. of shared granted request */
47 		uint16_t
48 		    interlock:              1,      /* Interlock */
49 		    priv_excl:              1,      /* priority for Writer */
50 		    want_upgrade:           1,      /* Read-to-write upgrade waiting */
51 		    want_excl:              1,      /* Writer is waiting, or locked for write */
52 		    r_waiting:              1,      /* Someone is sleeping on lock */
53 		    w_waiting:              1,      /* Writer is sleeping on lock */
54 		    can_sleep:              1,      /* Can attempts to lock go to sleep? */
55 		    _pad2:                  8,      /* padding */
56 		    tag_valid:              1;      /* Field is actually a tag, not a bitfield */
57 #if __arm64__
58 		uint32_t        _pad4;
59 #endif
60 	};
61 	struct {
62 		uint32_t        data;               /* Single word version of bitfields and shared count */
63 #if __arm64__
64 		uint32_t        lck_rw_pad4;
65 #endif
66 	};
67 } lck_rw_word_t;
68 
69 typedef struct {
70 	lck_rw_word_t   word;
71 	thread_t        lck_rw_owner __kernel_data_semantics;
72 } lck_rw_t;     /* arm: 8  arm64: 16 x86: 16 */
73 
74 #define lck_rw_shared_count     word.shared_count
75 #define lck_rw_interlock        word.interlock
76 #define lck_rw_priv_excl        word.priv_excl
77 #define lck_rw_want_upgrade     word.want_upgrade
78 #define lck_rw_want_excl        word.want_excl
79 #define lck_r_waiting           word.r_waiting
80 #define lck_w_waiting           word.w_waiting
81 #define lck_rw_can_sleep        word.can_sleep
82 #define lck_rw_data             word.data
83 // tag and data reference the same memory. When the tag_valid bit is set,
84 // the data word should be treated as a tag instead of a bitfield.
85 #define lck_rw_tag_valid        word.tag_valid
86 #define lck_rw_tag              word.data
87 
88 #define LCK_RW_SHARED_READER_OFFSET      0
89 #define LCK_RW_INTERLOCK_BIT            16
90 #define LCK_RW_PRIV_EXCL_BIT            17
91 #define LCK_RW_WANT_UPGRADE_BIT         18
92 #define LCK_RW_WANT_EXCL_BIT            19
93 #define LCK_RW_R_WAITING_BIT            20
94 #define LCK_RW_W_WAITING_BIT            21
95 #define LCK_RW_CAN_SLEEP_BIT            22
96 //                                      23-30
97 #define LCK_RW_TAG_VALID_BIT            31
98 
99 #define LCK_RW_INTERLOCK                (1U << LCK_RW_INTERLOCK_BIT)
100 #define LCK_RW_R_WAITING                (1U << LCK_RW_R_WAITING_BIT)
101 #define LCK_RW_W_WAITING                (1U << LCK_RW_W_WAITING_BIT)
102 #define LCK_RW_WANT_UPGRADE             (1U << LCK_RW_WANT_UPGRADE_BIT)
103 #define LCK_RW_WANT_EXCL                (1U << LCK_RW_WANT_EXCL_BIT)
104 #define LCK_RW_TAG_VALID                (1U << LCK_RW_TAG_VALID_BIT)
105 #define LCK_RW_PRIV_EXCL                (1U << LCK_RW_PRIV_EXCL_BIT)
106 #define LCK_RW_SHARED_MASK              (0xffff << LCK_RW_SHARED_READER_OFFSET)
107 #define LCK_RW_SHARED_READER            (0x1 << LCK_RW_SHARED_READER_OFFSET)
108 
109 #define LCK_RW_TAG_DESTROYED            ((LCK_RW_TAG_VALID | 0xdddddeadu))      /* lock marked as Destroyed */
110 
111 #else /* MACH_KERNEL_PRIVATE */
112 
113 #ifdef  KERNEL_PRIVATE
114 // TODO does this need pragma pack(1)?
115 typedef struct {
116 	uintptr_t       opaque[2] __kernel_data_semantics;
117 } lck_rw_t;
118 #else /* KERNEL_PRIVATE */
119 typedef struct __lck_rw_t__     lck_rw_t;
120 #endif /* KERNEL_PRIVATE */
121 #endif /* MACH_KERNEL_PRIVATE */
122 
123 #if DEVELOPMENT || DEBUG
124 #ifdef XNU_KERNEL_PRIVATE
125 
126 #define DEBUG_RW                        1
127 #define LCK_RW_EXPECTED_MAX_NUMBER      3       /* Expected number per thread of concurrently held rw_lock */
128 
129 #if __LP64__
130 #define LCK_RW_CALLER_PACKED_BITS   48
131 #define LCK_RW_CALLER_PACKED_SHIFT   0
132 #define LCK_RW_CALLER_PACKED_BASE    0
133 #else
134 #define LCK_RW_CALLER_PACKED_BITS   32
135 #define LCK_RW_CALLER_PACKED_SHIFT   0
136 #define LCK_RW_CALLER_PACKED_BASE    0
137 #endif
138 
139 _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(LCK_RW_CALLER_PACKED),
140     "Make sure the rwlde_caller_packed pointer packing is based on arithmetic shifts");
141 
142 
143 struct __attribute__ ((packed)) rw_lock_debug_entry {
144 	lck_rw_t      *rwlde_lock;                                       // rw_lock held
145 	int8_t        rwlde_mode_count;                                  // -1 is held in write mode, positive value is the recursive read count
146 #if __LP64__
147 	uintptr_t     rwlde_caller_packed: LCK_RW_CALLER_PACKED_BITS;    // caller that created the entry
148 #else
149 	uintptr_t     rwlde_caller_packed;                               // caller that created the entry
150 #endif
151 };
152 typedef struct rw_lock_debug {
153 	struct rw_lock_debug_entry rwld_locks[LCK_RW_EXPECTED_MAX_NUMBER]; /* rw_lock debug info of currently held locks */
154 	uint8_t                    rwld_locks_saved : 7,                   /* number of locks saved in rwld_locks */
155 	    rwld_overflow : 1;                                             /* lock_entry was full, so it might be inaccurate */
156 	uint32_t                   rwld_locks_acquired;                    /* number of locks acquired */
157 } rw_lock_debug_t;
158 
159 _Static_assert(LCK_RW_EXPECTED_MAX_NUMBER <= 127, "LCK_RW_EXPECTED_MAX_NUMBER bigger than rwld_locks_saved");
160 
161 #endif /* XNU_KERNEL_PRIVATE */
162 #endif /* DEVELOPMENT || DEBUG */
163 
164 typedef unsigned int     lck_rw_type_t;
165 
166 #define LCK_RW_TYPE_SHARED              0x01
167 #define LCK_RW_TYPE_EXCLUSIVE           0x02
168 
169 #define decl_lck_rw_data(class, name)   class lck_rw_t name
170 
171 #if XNU_KERNEL_PRIVATE
172 /*
173  * Auto-initializing rw-locks declarations
174  * ------------------------------------
175  *
176  * Unless you need to configure your locks in very specific ways,
177  * there is no point creating explicit lock attributes. For most
178  * static locks, this declaration macro can be used:
179  *
180  * - LCK_RW_DECLARE.
181  *
182  * For cases when some particular attributes need to be used,
183  * LCK_RW_DECLARE_ATTR takes a variable declared with
184  * LCK_ATTR_DECLARE as an argument.
185  */
186 
187 struct lck_rw_startup_spec {
188 	lck_rw_t                *lck;
189 	lck_grp_t               *lck_grp;
190 	lck_attr_t              *lck_attr;
191 };
192 
193 extern void             lck_rw_startup_init(
194 	struct lck_rw_startup_spec *spec);
195 
196 #define LCK_RW_DECLARE_ATTR(var, grp, attr) \
197 	lck_rw_t var; \
198 	static __startup_data struct lck_rw_startup_spec \
199 	__startup_lck_rw_spec_ ## var = { &var, grp, attr }; \
200 	STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_rw_startup_init, \
201 	    &__startup_lck_rw_spec_ ## var)
202 
203 #define LCK_RW_DECLARE(var, grp) \
204 	LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
205 
206 #define LCK_RW_ASSERT_SHARED    0x01
207 #define LCK_RW_ASSERT_EXCLUSIVE 0x02
208 #define LCK_RW_ASSERT_HELD      0x03
209 #define LCK_RW_ASSERT_NOTHELD   0x04
210 #endif /* XNU_KERNEL_PRIVATE */
211 
212 #if MACH_ASSERT
213 #define LCK_RW_ASSERT(lck, type) lck_rw_assert((lck),(type))
214 #else /* MACH_ASSERT */
215 #define LCK_RW_ASSERT(lck, type)
216 #endif /* MACH_ASSERT */
217 
218 #if DEBUG
219 #define LCK_RW_ASSERT_DEBUG(lck, type) lck_rw_assert((lck),(type))
220 #else /* DEBUG */
221 #define LCK_RW_ASSERT_DEBUG(lck, type)
222 #endif /* DEBUG */
223 
224 /*!
225  * @function lck_rw_alloc_init
226  *
227  * @abstract
228  * Allocates and initializes a rw_lock_t.
229  *
230  * @discussion
231  * The function can block. See lck_rw_init() for initialization details.
232  *
233  * @param grp           lock group to associate with the lock.
234  * @param attr          lock attribute to initialize the lock.
235  *
236  * @returns             NULL or the allocated lock
237  */
238 extern lck_rw_t         *lck_rw_alloc_init(
239 	lck_grp_t               *grp,
240 	lck_attr_t              *attr);
241 
242 /*!
243  * @function lck_rw_init
244  *
245  * @abstract
246  * Initializes a rw_lock_t.
247  *
248  * @discussion
249  * Usage statistics for the lock are going to be added to the lock group provided.
250  *
251  * The lock attribute can be LCK_ATTR_NULL or an attribute can be allocated with
252  * lck_attr_alloc_init. So far however none of the attribute settings are supported.
253  *
254  * @param lck           lock to initialize.
255  * @param grp           lock group to associate with the lock.
256  * @param attr          lock attribute to initialize the lock.
257  */
258 extern void             lck_rw_init(
259 	lck_rw_t                *lck,
260 	lck_grp_t               *grp,
261 	lck_attr_t              *attr);
262 
263 /*!
264  * @function lck_rw_free
265  *
266  * @abstract
267  * Frees a rw_lock previously allocated with lck_rw_alloc_init().
268  *
269  * @discussion
270  * The lock must be not held by any thread.
271  *
272  * @param lck           rw_lock to free.
273  */
274 extern void             lck_rw_free(
275 	lck_rw_t                *lck,
276 	lck_grp_t               *grp);
277 
278 /*!
279  * @function lck_rw_destroy
280  *
281  * @abstract
282  * Destroys a rw_lock previously initialized with lck_rw_init().
283  *
284  * @discussion
285  * The lock must be not held by any thread.
286  *
287  * @param lck           rw_lock to destroy.
288  */
289 extern void             lck_rw_destroy(
290 	lck_rw_t                *lck,
291 	lck_grp_t               *grp);
292 
293 /*!
294  * @function lck_rw_lock
295  *
296  * @abstract
297  * Locks a rw_lock with the specified type.
298  *
299  * @discussion
300  * See lck_rw_lock_shared() or lck_rw_lock_exclusive() for more details.
301  *
302  * @param lck           rw_lock to lock.
303  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
304  */
305 extern void             lck_rw_lock(
306 	lck_rw_t                *lck,
307 	lck_rw_type_t           lck_rw_type);
308 
309 /*!
310  * @function lck_rw_try_lock
311  *
312  * @abstract
313  * Tries to locks a rw_lock with the specified type.
314  *
315  * @discussion
316  * This function will return and not wait/block in case the lock is already held.
317  * See lck_rw_try_lock_shared() or lck_rw_try_lock_exclusive() for more details.
318  *
319  * @param lck           rw_lock to lock.
320  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
321  *
322  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
323  */
324 extern boolean_t        lck_rw_try_lock(
325 	lck_rw_t                *lck,
326 	lck_rw_type_t           lck_rw_type);
327 
328 /*!
329  * @function lck_rw_unlock
330  *
331  * @abstract
332  * Unlocks a rw_lock previously locked with lck_rw_type.
333  *
334  * @discussion
335  * The lock must be unlocked by the same thread it was locked from.
336  * The type of the lock/unlock have to match, unless an upgrade/downgrade was performed while
337  * holding the lock.
338  *
339  * @param lck           rw_lock to unlock.
340  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
341  */
342 extern void             lck_rw_unlock(
343 	lck_rw_t                *lck,
344 	lck_rw_type_t           lck_rw_type);
345 
346 /*!
347  * @function lck_rw_lock_shared
348  *
349  * @abstract
350  * Locks a rw_lock in shared mode.
351  *
352  * @discussion
353  * This function can block.
354  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
355  * can acquire it in exclusive mode.
356  * If the lock is held in shared mode and there are no writers waiting, a reader will be able to acquire
357  * the lock without waiting.
358  * If the lock is held in shared mode and there is at least a writer waiting, a reader will wait
359  * for all the writers to make progress.
360  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
361  *
362  * @param lck           rw_lock to lock.
363  */
364 extern void             lck_rw_lock_shared(
365 	lck_rw_t                *lck);
366 
367 /*!
368  * @function lck_rw_lock_shared_to_exclusive
369  *
370  * @abstract
371  * Upgrades a rw_lock held in shared mode to exclusive.
372  *
373  * @discussion
374  * This function can block.
375  * Only one reader at a time can upgrade to exclusive mode. If the upgrades fails the function will
376  * return with the lock not held.
377  * The caller needs to hold the lock in shared mode to upgrade it.
378  *
379  * @param lck           rw_lock already held in shared mode to upgrade.
380  *
381  * @returns TRUE if the lock was upgraded, FALSE if it was not possible.
382  *          If the function was not able to upgrade the lock, the lock will be dropped
383  *          by the function.
384  */
385 extern boolean_t        lck_rw_lock_shared_to_exclusive(
386 	lck_rw_t                *lck);
387 
388 /*!
389  * @function lck_rw_unlock_shared
390  *
391  * @abstract
392  * Unlocks a rw_lock previously locked in shared mode.
393  *
394  * @discussion
395  * The same thread that locked the lock needs to unlock it.
396  *
397  * @param lck           rw_lock held in shared mode to unlock.
398  */
399 extern void             lck_rw_unlock_shared(
400 	lck_rw_t                *lck);
401 
402 /*!
403  * @function lck_rw_lock_exclusive
404  *
405  * @abstract
406  * Locks a rw_lock in exclusive mode.
407  *
408  * @discussion
409  * This function can block.
410  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
411  * can acquire it in exclusive mode.
412  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
413  *
414  * @param lck           rw_lock to lock.
415  */
416 extern void             lck_rw_lock_exclusive(
417 	lck_rw_t                *lck);
418 
419 /*!
420  * @function lck_rw_lock_exclusive_to_shared
421  *
422  * @abstract
423  * Downgrades a rw_lock held in exclusive mode to shared.
424  *
425  * @discussion
426  * The caller needs to hold the lock in exclusive mode to be able to downgrade it.
427  *
428  * @param lck           rw_lock already held in exclusive mode to downgrade.
429  */
430 extern void             lck_rw_lock_exclusive_to_shared(
431 	lck_rw_t                *lck);
432 
433 /*!
434  * @function lck_rw_unlock_exclusive
435  *
436  * @abstract
437  * Unlocks a rw_lock previously locked in exclusive mode.
438  *
439  * @discussion
440  * The same thread that locked the lock needs to unlock it.
441  *
442  * @param lck           rw_lock held in exclusive mode to unlock.
443  */
444 extern void             lck_rw_unlock_exclusive(
445 	lck_rw_t                *lck);
446 
447 /*!
448  * @function lck_rw_sleep
449  *
450  * @abstract
451  * Assert_wait on an event while holding the rw_lock.
452  *
453  * @discussion
454  * the flags can decide how to re-acquire the lock upon wake up
455  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
456  * and if the priority needs to be kept boosted until the lock is
457  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
458  *
459  * @param lck                   rw_lock to use to synch the assert_wait.
460  * @param lck_sleep_action      flags.
461  * @param event                 event to assert_wait on.
462  * @param interruptible         wait type.
463  */
464 extern wait_result_t    lck_rw_sleep(
465 	lck_rw_t                *lck,
466 	lck_sleep_action_t      lck_sleep_action,
467 	event_t                 event,
468 	wait_interrupt_t        interruptible);
469 
470 /*!
471  * @function lck_rw_sleep_deadline
472  *
473  * @abstract
474  * Assert_wait_deadline on an event while holding the rw_lock.
475  *
476  * @discussion
477  * the flags can decide how to re-acquire the lock upon wake up
478  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
479  * and if the priority needs to be kept boosted until the lock is
480  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
481  *
482  * @param lck                   rw_lock to use to synch the assert_wait.
483  * @param lck_sleep_action      flags.
484  * @param event                 event to assert_wait on.
485  * @param interruptible         wait type.
486  * @param deadline              maximum time after which being woken up
487  */
488 extern wait_result_t    lck_rw_sleep_deadline(
489 	lck_rw_t                *lck,
490 	lck_sleep_action_t      lck_sleep_action,
491 	event_t                 event,
492 	wait_interrupt_t        interruptible,
493 	uint64_t                deadline);
494 
495 #ifdef  XNU_KERNEL_PRIVATE
496 /*!
497  * @function lck_rw_assert
498  *
499  * @abstract
500  * Asserts the rw_lock is held.
501  *
502  * @discussion
503  * read-write locks do not have a concept of ownership when held in shared mode,
504  * so this function merely asserts that someone is holding the lock, not necessarily the caller.
505  * However if rw_lock_debug is on, a best effort mechanism to track the owners is in place, and
506  * this function can be more accurate.
507  * Type can be LCK_RW_ASSERT_SHARED, LCK_RW_ASSERT_EXCLUSIVE, LCK_RW_ASSERT_HELD
508  * LCK_RW_ASSERT_NOTHELD.
509  *
510  * @param lck   rw_lock to check.
511  * @param type  assert type
512  */
513 extern void             lck_rw_assert(
514 	lck_rw_t                *lck,
515 	unsigned int            type);
516 
517 /*!
518  * @function kdp_lck_rw_lock_is_acquired_exclusive
519  *
520  * @abstract
521  * Checks if a rw_lock is held exclusevely.
522  *
523  * @discussion
524  * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
525  *
526  * @param lck   lock to check
527  *
528  * @returns TRUE if the lock is held exclusevely
529  */
530 extern boolean_t        kdp_lck_rw_lock_is_acquired_exclusive(
531 	lck_rw_t                *lck);
532 
533 /*!
534  * @function lck_rw_lock_exclusive_check_contended
535  *
536  * @abstract
537  * Locks a rw_lock in exclusive mode.
538  *
539  * @discussion
540  * This routine IS EXPERIMENTAL.
541  * It's only used for the vm object lock, and use for other subsystems is UNSUPPORTED.
542  * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's contention.
543  *
544  * @param lck           rw_lock to lock.
545  *
546  * @returns Returns TRUE if the thread spun or blocked while attempting to acquire the lock, FALSE
547  *          otherwise.
548  */
549 extern bool             lck_rw_lock_exclusive_check_contended(
550 	lck_rw_t                *lck);
551 
552 /*!
553  * @function lck_rw_lock_yield_shared
554  *
555  * @abstract
556  * Yields a rw_lock held in shared mode.
557  *
558  * @discussion
559  * This function can block.
560  * Yields the lock in case there are writers waiting.
561  * The yield will unlock, block, and re-lock the lock in shared mode.
562  *
563  * @param lck           rw_lock already held in shared mode to yield.
564  * @param force_yield   if set to true it will always yield irrespective of the lock status
565  *
566  * @returns TRUE if the lock was yield, FALSE otherwise
567  */
568 extern boolean_t        lck_rw_lock_yield_shared(
569 	lck_rw_t                *lck,
570 	boolean_t               force_yield);
571 #endif /* XNU_KERNEL_PRIVATE */
572 
573 #if MACH_KERNEL_PRIVATE
574 
575 /*!
576  * @function lck_rw_clear_promotion
577  *
578  * @abstract
579  * Undo priority promotions when the last rw_lock
580  * is released by a thread (if a promotion was active).
581  *
582  * @param thread        thread to demote.
583  * @param trace_obj     object reason for the demotion.
584  */
585 extern void             lck_rw_clear_promotion(
586 	thread_t                thread,
587 	uintptr_t               trace_obj);
588 
589 /*!
590  * @function lck_rw_set_promotion_locked
591  *
592  * @abstract
593  * Callout from context switch if the thread goes
594  * off core with a positive rwlock_count.
595  *
596  * @discussion
597  * Called at splsched with the thread locked.
598  *
599  * @param thread        thread to promote.
600  */
601 extern void             lck_rw_set_promotion_locked(
602 	thread_t                thread);
603 
604 #endif /* MACH_KERNEL_PRIVATE */
605 
606 #ifdef  KERNEL_PRIVATE
607 /*!
608  * @function lck_rw_try_lock_shared
609  *
610  * @abstract
611  * Tries to locks a rw_lock in read mode.
612  *
613  * @discussion
614  * This function will return and not block in case the lock is already held.
615  * See lck_rw_lock_shared for more details.
616  *
617  * @param lck           rw_lock to lock.
618  *
619  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
620  */
621 extern boolean_t        lck_rw_try_lock_shared(
622 	lck_rw_t                *lck);
623 
624 /*!
625  * @function lck_rw_try_lock_exclusive
626  *
627  * @abstract
628  * Tries to locks a rw_lock in write mode.
629  *
630  * @discussion
631  * This function will return and not block in case the lock is already held.
632  * See lck_rw_lock_exclusive for more details.
633  *
634  * @param lck           rw_lock to lock.
635  *
636  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
637  */
638 extern boolean_t        lck_rw_try_lock_exclusive(
639 	lck_rw_t                *lck);
640 
641 /*!
642  * @function lck_rw_done
643  *
644  * @abstract
645  * Force unlocks a rw_lock without consistency checks.
646  *
647  * @discussion
648  * Do not use unless sure you can avoid consistency checks.
649  *
650  * @param lck           rw_lock to unlock.
651  */
652 extern lck_rw_type_t    lck_rw_done(
653 	lck_rw_t                *lck);
654 #endif /* KERNEL_PRIVATE */
655 
656 __END_DECLS
657 
658 #endif /* _KERN_RW_LOCK_H_ */
659