xref: /xnu-8019.80.24/osfmk/kern/lock_rw.h (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KERN_RW_LOCK_H_
30 #define _KERN_RW_LOCK_H_
31 
32 #include <kern/lock_types.h>
33 #include <kern/lock_group.h>
34 #include <kern/lock_attr.h>
35 
36 #ifdef  XNU_KERNEL_PRIVATE
37 #include <kern/startup.h>
38 #endif /* XNU_KERNEL_PRIVATE */
39 
40 __BEGIN_DECLS
41 
42 #ifdef  MACH_KERNEL_PRIVATE
43 extern uint32_t LcksOpts;
44 
45 typedef union {
46 	struct {
47 		uint16_t        shared_count;       /* No. of shared granted request */
48 		uint16_t
49 		    interlock:              1,      /* Interlock */
50 		    priv_excl:              1,      /* priority for Writer */
51 		    want_upgrade:           1,      /* Read-to-write upgrade waiting */
52 		    want_excl:              1,      /* Writer is waiting, or locked for write */
53 		    r_waiting:              1,      /* Someone is sleeping on lock */
54 		    w_waiting:              1,      /* Writer is sleeping on lock */
55 		    can_sleep:              1,      /* Can attempts to lock go to sleep? */
56 		    _pad2:                  8,      /* padding */
57 		    tag_valid:              1;      /* Field is actually a tag, not a bitfield */
58 #if __arm64__
59 		uint32_t        _pad4;
60 #endif
61 	};
62 	struct {
63 		uint32_t        data;               /* Single word version of bitfields and shared count */
64 #if __arm64__
65 		uint32_t        lck_rw_pad4;
66 #endif
67 	};
68 } lck_rw_word_t;
69 
70 typedef struct {
71 	lck_rw_word_t   word;
72 	thread_t        lck_rw_owner __kernel_data_semantics;
73 } lck_rw_t;     /* arm: 8  arm64: 16 x86: 16 */
74 
75 #define lck_rw_shared_count     word.shared_count
76 #define lck_rw_interlock        word.interlock
77 #define lck_rw_priv_excl        word.priv_excl
78 #define lck_rw_want_upgrade     word.want_upgrade
79 #define lck_rw_want_excl        word.want_excl
80 #define lck_r_waiting           word.r_waiting
81 #define lck_w_waiting           word.w_waiting
82 #define lck_rw_can_sleep        word.can_sleep
83 #define lck_rw_data             word.data
84 // tag and data reference the same memory. When the tag_valid bit is set,
85 // the data word should be treated as a tag instead of a bitfield.
86 #define lck_rw_tag_valid        word.tag_valid
87 #define lck_rw_tag              word.data
88 
89 #define LCK_RW_SHARED_READER_OFFSET      0
90 #define LCK_RW_INTERLOCK_BIT            16
91 #define LCK_RW_PRIV_EXCL_BIT            17
92 #define LCK_RW_WANT_UPGRADE_BIT         18
93 #define LCK_RW_WANT_EXCL_BIT            19
94 #define LCK_RW_R_WAITING_BIT            20
95 #define LCK_RW_W_WAITING_BIT            21
96 #define LCK_RW_CAN_SLEEP_BIT            22
97 //                                      23-30
98 #define LCK_RW_TAG_VALID_BIT            31
99 
100 #define LCK_RW_INTERLOCK                (1U << LCK_RW_INTERLOCK_BIT)
101 #define LCK_RW_R_WAITING                (1U << LCK_RW_R_WAITING_BIT)
102 #define LCK_RW_W_WAITING                (1U << LCK_RW_W_WAITING_BIT)
103 #define LCK_RW_WANT_UPGRADE             (1U << LCK_RW_WANT_UPGRADE_BIT)
104 #define LCK_RW_WANT_EXCL                (1U << LCK_RW_WANT_EXCL_BIT)
105 #define LCK_RW_TAG_VALID                (1U << LCK_RW_TAG_VALID_BIT)
106 #define LCK_RW_PRIV_EXCL                (1U << LCK_RW_PRIV_EXCL_BIT)
107 #define LCK_RW_SHARED_MASK              (0xffff << LCK_RW_SHARED_READER_OFFSET)
108 #define LCK_RW_SHARED_READER            (0x1 << LCK_RW_SHARED_READER_OFFSET)
109 
110 #define LCK_RW_TAG_DESTROYED            ((LCK_RW_TAG_VALID | 0xdddddeadu))      /* lock marked as Destroyed */
111 
112 #else /* MACH_KERNEL_PRIVATE */
113 
114 #ifdef  KERNEL_PRIVATE
115 // TODO does this need pragma pack(1)?
116 typedef struct {
117 	uintptr_t       opaque[2] __kernel_data_semantics;
118 } lck_rw_t;
119 #else /* KERNEL_PRIVATE */
120 typedef struct __lck_rw_t__     lck_rw_t;
121 #endif /* KERNEL_PRIVATE */
122 #endif /* MACH_KERNEL_PRIVATE */
123 
124 #if DEVELOPMENT || DEBUG
125 #ifdef XNU_KERNEL_PRIVATE
126 
127 #define DEBUG_RW                        1
128 #define LCK_RW_EXPECTED_MAX_NUMBER      3       /* Expected number per thread of concurrently held rw_lock */
129 
130 #if __LP64__
131 #define LCK_RW_CALLER_PACKED_BITS   48
132 #define LCK_RW_CALLER_PACKED_SHIFT   0
133 #define LCK_RW_CALLER_PACKED_BASE    0
134 #else
135 #define LCK_RW_CALLER_PACKED_BITS   32
136 #define LCK_RW_CALLER_PACKED_SHIFT   0
137 #define LCK_RW_CALLER_PACKED_BASE    0
138 #endif
139 
140 _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(LCK_RW_CALLER_PACKED),
141     "Make sure the rwlde_caller_packed pointer packing is based on arithmetic shifts");
142 
143 
144 struct __attribute__ ((packed)) rw_lock_debug_entry {
145 	lck_rw_t      *rwlde_lock;                                       // rw_lock held
146 	int8_t        rwlde_mode_count;                                  // -1 is held in write mode, positive value is the recursive read count
147 #if __LP64__
148 	uintptr_t     rwlde_caller_packed: LCK_RW_CALLER_PACKED_BITS;    // caller that created the entry
149 #else
150 	uintptr_t     rwlde_caller_packed;                               // caller that created the entry
151 #endif
152 };
153 typedef struct rw_lock_debug {
154 	struct rw_lock_debug_entry rwld_locks[LCK_RW_EXPECTED_MAX_NUMBER]; /* rw_lock debug info of currently held locks */
155 	uint8_t                    rwld_locks_saved : 7,                   /* number of locks saved in rwld_locks */
156 	    rwld_overflow : 1;                                             /* lock_entry was full, so it might be inaccurate */
157 	uint32_t                   rwld_locks_acquired;                    /* number of locks acquired */
158 } rw_lock_debug_t;
159 
160 _Static_assert(LCK_RW_EXPECTED_MAX_NUMBER <= 127, "LCK_RW_EXPECTED_MAX_NUMBER bigger than rwld_locks_saved");
161 
162 #endif /* XNU_KERNEL_PRIVATE */
163 #endif /* DEVELOPMENT || DEBUG */
164 
165 typedef unsigned int     lck_rw_type_t;
166 
167 #define LCK_RW_TYPE_SHARED              0x01
168 #define LCK_RW_TYPE_EXCLUSIVE           0x02
169 
170 #define decl_lck_rw_data(class, name)   class lck_rw_t name
171 
172 #if XNU_KERNEL_PRIVATE
173 /*
174  * Auto-initializing rw-locks declarations
175  * ------------------------------------
176  *
177  * Unless you need to configure your locks in very specific ways,
178  * there is no point creating explicit lock attributes. For most
179  * static locks, this declaration macro can be used:
180  *
181  * - LCK_RW_DECLARE.
182  *
183  * For cases when some particular attributes need to be used,
184  * LCK_RW_DECLARE_ATTR takes a variable declared with
185  * LCK_ATTR_DECLARE as an argument.
186  */
187 
188 struct lck_rw_startup_spec {
189 	lck_rw_t                *lck;
190 	lck_grp_t               *lck_grp;
191 	lck_attr_t              *lck_attr;
192 };
193 
194 extern void             lck_rw_startup_init(
195 	struct lck_rw_startup_spec *spec);
196 
197 #define LCK_RW_DECLARE_ATTR(var, grp, attr) \
198 	lck_rw_t var; \
199 	static __startup_data struct lck_rw_startup_spec \
200 	__startup_lck_rw_spec_ ## var = { &var, grp, attr }; \
201 	STARTUP_ARG(LOCKS_EARLY, STARTUP_RANK_FOURTH, lck_rw_startup_init, \
202 	    &__startup_lck_rw_spec_ ## var)
203 
204 #define LCK_RW_DECLARE(var, grp) \
205 	LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL)
206 
207 #define LCK_RW_ASSERT_SHARED    0x01
208 #define LCK_RW_ASSERT_EXCLUSIVE 0x02
209 #define LCK_RW_ASSERT_HELD      0x03
210 #define LCK_RW_ASSERT_NOTHELD   0x04
211 #endif /* XNU_KERNEL_PRIVATE */
212 
213 #if MACH_ASSERT
214 #define LCK_RW_ASSERT(lck, type) lck_rw_assert((lck),(type))
215 #else /* MACH_ASSERT */
216 #define LCK_RW_ASSERT(lck, type)
217 #endif /* MACH_ASSERT */
218 
219 #if DEBUG
220 #define LCK_RW_ASSERT_DEBUG(lck, type) lck_rw_assert((lck),(type))
221 #else /* DEBUG */
222 #define LCK_RW_ASSERT_DEBUG(lck, type)
223 #endif /* DEBUG */
224 
225 /*!
226  * @function lck_rw_alloc_init
227  *
228  * @abstract
229  * Allocates and initializes a rw_lock_t.
230  *
231  * @discussion
232  * The function can block. See lck_rw_init() for initialization details.
233  *
234  * @param grp           lock group to associate with the lock.
235  * @param attr          lock attribute to initialize the lock.
236  *
237  * @returns             NULL or the allocated lock
238  */
239 extern lck_rw_t         *lck_rw_alloc_init(
240 	lck_grp_t               *grp,
241 	lck_attr_t              *attr);
242 
243 /*!
244  * @function lck_rw_init
245  *
246  * @abstract
247  * Initializes a rw_lock_t.
248  *
249  * @discussion
250  * Usage statistics for the lock are going to be added to the lock group provided.
251  *
252  * The lock attribute can be LCK_ATTR_NULL or an attribute can be allocated with
253  * lck_attr_alloc_init. So far however none of the attribute settings are supported.
254  *
255  * @param lck           lock to initialize.
256  * @param grp           lock group to associate with the lock.
257  * @param attr          lock attribute to initialize the lock.
258  */
259 extern void             lck_rw_init(
260 	lck_rw_t                *lck,
261 	lck_grp_t               *grp,
262 	lck_attr_t              *attr);
263 
264 /*!
265  * @function lck_rw_free
266  *
267  * @abstract
268  * Frees a rw_lock previously allocated with lck_rw_alloc_init().
269  *
270  * @discussion
271  * The lock must be not held by any thread.
272  *
273  * @param lck           rw_lock to free.
274  */
275 extern void             lck_rw_free(
276 	lck_rw_t                *lck,
277 	lck_grp_t               *grp);
278 
279 /*!
280  * @function lck_rw_destroy
281  *
282  * @abstract
283  * Destroys a rw_lock previously initialized with lck_rw_init().
284  *
285  * @discussion
286  * The lock must be not held by any thread.
287  *
288  * @param lck           rw_lock to destroy.
289  */
290 extern void             lck_rw_destroy(
291 	lck_rw_t                *lck,
292 	lck_grp_t               *grp);
293 
294 /*!
295  * @function lck_rw_lock
296  *
297  * @abstract
298  * Locks a rw_lock with the specified type.
299  *
300  * @discussion
301  * See lck_rw_lock_shared() or lck_rw_lock_exclusive() for more details.
302  *
303  * @param lck           rw_lock to lock.
304  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
305  */
306 extern void             lck_rw_lock(
307 	lck_rw_t                *lck,
308 	lck_rw_type_t           lck_rw_type);
309 
310 /*!
311  * @function lck_rw_try_lock
312  *
313  * @abstract
314  * Tries to locks a rw_lock with the specified type.
315  *
316  * @discussion
317  * This function will return and not wait/block in case the lock is already held.
318  * See lck_rw_try_lock_shared() or lck_rw_try_lock_exclusive() for more details.
319  *
320  * @param lck           rw_lock to lock.
321  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
322  *
323  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
324  */
325 extern boolean_t        lck_rw_try_lock(
326 	lck_rw_t                *lck,
327 	lck_rw_type_t           lck_rw_type);
328 
329 /*!
330  * @function lck_rw_unlock
331  *
332  * @abstract
333  * Unlocks a rw_lock previously locked with lck_rw_type.
334  *
335  * @discussion
336  * The lock must be unlocked by the same thread it was locked from.
337  * The type of the lock/unlock have to match, unless an upgrade/downgrade was performed while
338  * holding the lock.
339  *
340  * @param lck           rw_lock to unlock.
341  * @param lck_rw_type   LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE
342  */
343 extern void             lck_rw_unlock(
344 	lck_rw_t                *lck,
345 	lck_rw_type_t           lck_rw_type);
346 
347 /*!
348  * @function lck_rw_lock_shared
349  *
350  * @abstract
351  * Locks a rw_lock in shared mode.
352  *
353  * @discussion
354  * This function can block.
355  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
356  * can acquire it in exclusive mode.
357  * If the lock is held in shared mode and there are no writers waiting, a reader will be able to acquire
358  * the lock without waiting.
359  * If the lock is held in shared mode and there is at least a writer waiting, a reader will wait
360  * for all the writers to make progress.
361  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
362  *
363  * @param lck           rw_lock to lock.
364  */
365 extern void             lck_rw_lock_shared(
366 	lck_rw_t                *lck);
367 
368 /*!
369  * @function lck_rw_lock_shared_to_exclusive
370  *
371  * @abstract
372  * Upgrades a rw_lock held in shared mode to exclusive.
373  *
374  * @discussion
375  * This function can block.
376  * Only one reader at a time can upgrade to exclusive mode. If the upgrades fails the function will
377  * return with the lock not held.
378  * The caller needs to hold the lock in shared mode to upgrade it.
379  *
380  * @param lck           rw_lock already held in shared mode to upgrade.
381  *
382  * @returns TRUE if the lock was upgraded, FALSE if it was not possible.
383  *          If the function was not able to upgrade the lock, the lock will be dropped
384  *          by the function.
385  */
386 extern boolean_t        lck_rw_lock_shared_to_exclusive(
387 	lck_rw_t                *lck);
388 
389 /*!
390  * @function lck_rw_unlock_shared
391  *
392  * @abstract
393  * Unlocks a rw_lock previously locked in shared mode.
394  *
395  * @discussion
396  * The same thread that locked the lock needs to unlock it.
397  *
398  * @param lck           rw_lock held in shared mode to unlock.
399  */
400 extern void             lck_rw_unlock_shared(
401 	lck_rw_t                *lck);
402 
403 /*!
404  * @function lck_rw_lock_exclusive
405  *
406  * @abstract
407  * Locks a rw_lock in exclusive mode.
408  *
409  * @discussion
410  * This function can block.
411  * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time
412  * can acquire it in exclusive mode.
413  * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported.
414  *
415  * @param lck           rw_lock to lock.
416  */
417 extern void             lck_rw_lock_exclusive(
418 	lck_rw_t                *lck);
419 
420 /*!
421  * @function lck_rw_lock_exclusive_to_shared
422  *
423  * @abstract
424  * Downgrades a rw_lock held in exclusive mode to shared.
425  *
426  * @discussion
427  * The caller needs to hold the lock in exclusive mode to be able to downgrade it.
428  *
429  * @param lck           rw_lock already held in exclusive mode to downgrade.
430  */
431 extern void             lck_rw_lock_exclusive_to_shared(
432 	lck_rw_t                *lck);
433 
434 /*!
435  * @function lck_rw_unlock_exclusive
436  *
437  * @abstract
438  * Unlocks a rw_lock previously locked in exclusive mode.
439  *
440  * @discussion
441  * The same thread that locked the lock needs to unlock it.
442  *
443  * @param lck           rw_lock held in exclusive mode to unlock.
444  */
445 extern void             lck_rw_unlock_exclusive(
446 	lck_rw_t                *lck);
447 
448 /*!
449  * @function lck_rw_sleep
450  *
451  * @abstract
452  * Assert_wait on an event while holding the rw_lock.
453  *
454  * @discussion
455  * the flags can decide how to re-acquire the lock upon wake up
456  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
457  * and if the priority needs to be kept boosted until the lock is
458  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
459  *
460  * @param lck                   rw_lock to use to synch the assert_wait.
461  * @param lck_sleep_action      flags.
462  * @param event                 event to assert_wait on.
463  * @param interruptible         wait type.
464  */
465 extern wait_result_t    lck_rw_sleep(
466 	lck_rw_t                *lck,
467 	lck_sleep_action_t      lck_sleep_action,
468 	event_t                 event,
469 	wait_interrupt_t        interruptible);
470 
471 /*!
472  * @function lck_rw_sleep_deadline
473  *
474  * @abstract
475  * Assert_wait_deadline on an event while holding the rw_lock.
476  *
477  * @discussion
478  * the flags can decide how to re-acquire the lock upon wake up
479  * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK)
480  * and if the priority needs to be kept boosted until the lock is
481  * re-acquired (LCK_SLEEP_PROMOTED_PRI).
482  *
483  * @param lck                   rw_lock to use to synch the assert_wait.
484  * @param lck_sleep_action      flags.
485  * @param event                 event to assert_wait on.
486  * @param interruptible         wait type.
487  * @param deadline              maximum time after which being woken up
488  */
489 extern wait_result_t    lck_rw_sleep_deadline(
490 	lck_rw_t                *lck,
491 	lck_sleep_action_t      lck_sleep_action,
492 	event_t                 event,
493 	wait_interrupt_t        interruptible,
494 	uint64_t                deadline);
495 
496 #ifdef  XNU_KERNEL_PRIVATE
497 /*!
498  * @function lck_rw_assert
499  *
500  * @abstract
501  * Asserts the rw_lock is held.
502  *
503  * @discussion
504  * read-write locks do not have a concept of ownership when held in shared mode,
505  * so this function merely asserts that someone is holding the lock, not necessarily the caller.
506  * However if rw_lock_debug is on, a best effort mechanism to track the owners is in place, and
507  * this function can be more accurate.
508  * Type can be LCK_RW_ASSERT_SHARED, LCK_RW_ASSERT_EXCLUSIVE, LCK_RW_ASSERT_HELD
509  * LCK_RW_ASSERT_NOTHELD.
510  *
511  * @param lck   rw_lock to check.
512  * @param type  assert type
513  */
514 extern void             lck_rw_assert(
515 	lck_rw_t                *lck,
516 	unsigned int            type);
517 
518 /*!
519  * @function kdp_lck_rw_lock_is_acquired_exclusive
520  *
521  * @abstract
522  * Checks if a rw_lock is held exclusevely.
523  *
524  * @discussion
525  * NOT SAFE: To be used only by kernel debugger to avoid deadlock.
526  *
527  * @param lck   lock to check
528  *
529  * @returns TRUE if the lock is held exclusevely
530  */
531 extern boolean_t        kdp_lck_rw_lock_is_acquired_exclusive(
532 	lck_rw_t                *lck);
533 
534 /*!
535  * @function lck_rw_lock_exclusive_check_contended
536  *
537  * @abstract
538  * Locks a rw_lock in exclusive mode.
539  *
540  * @discussion
541  * This routine IS EXPERIMENTAL.
542  * It's only used for the vm object lock, and use for other subsystems is UNSUPPORTED.
543  * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's contention.
544  *
545  * @param lck           rw_lock to lock.
546  *
547  * @returns Returns TRUE if the thread spun or blocked while attempting to acquire the lock, FALSE
548  *          otherwise.
549  */
550 extern bool             lck_rw_lock_exclusive_check_contended(
551 	lck_rw_t                *lck);
552 
553 /*!
554  * @function lck_rw_lock_yield_shared
555  *
556  * @abstract
557  * Yields a rw_lock held in shared mode.
558  *
559  * @discussion
560  * This function can block.
561  * Yields the lock in case there are writers waiting.
562  * The yield will unlock, block, and re-lock the lock in shared mode.
563  *
564  * @param lck           rw_lock already held in shared mode to yield.
565  * @param force_yield   if set to true it will always yield irrespective of the lock status
566  *
567  * @returns TRUE if the lock was yield, FALSE otherwise
568  */
569 extern boolean_t        lck_rw_lock_yield_shared(
570 	lck_rw_t                *lck,
571 	boolean_t               force_yield);
572 #endif /* XNU_KERNEL_PRIVATE */
573 
574 #if MACH_KERNEL_PRIVATE
575 #ifdef DEBUG_RW
576 /*!
577  * @function rw_lock_init
578  *
579  * @abstract
580  * Initialize the rw_lock subsystem
581  */
582 extern void rw_lock_init(void);
583 #endif /* DEBUG_RW */
584 
585 /*!
586  * @function lck_rw_clear_promotion
587  *
588  * @abstract
589  * Undo priority promotions when the last rw_lock
590  * is released by a thread (if a promotion was active).
591  *
592  * @param thread        thread to demote.
593  * @param trace_obj     object reason for the demotion.
594  */
595 extern void             lck_rw_clear_promotion(
596 	thread_t                thread,
597 	uintptr_t               trace_obj);
598 
599 /*!
600  * @function lck_rw_set_promotion_locked
601  *
602  * @abstract
603  * Callout from context switch if the thread goes
604  * off core with a positive rwlock_count.
605  *
606  * @discussion
607  * Called at splsched with the thread locked.
608  *
609  * @param thread        thread to promote.
610  */
611 extern void             lck_rw_set_promotion_locked(
612 	thread_t                thread);
613 
614 #endif /* MACH_KERNEL_PRIVATE */
615 
616 #ifdef  KERNEL_PRIVATE
617 /*!
618  * @function lck_rw_try_lock_shared
619  *
620  * @abstract
621  * Tries to locks a rw_lock in read mode.
622  *
623  * @discussion
624  * This function will return and not block in case the lock is already held.
625  * See lck_rw_lock_shared for more details.
626  *
627  * @param lck           rw_lock to lock.
628  *
629  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
630  */
631 extern boolean_t        lck_rw_try_lock_shared(
632 	lck_rw_t                *lck);
633 
634 /*!
635  * @function lck_rw_try_lock_exclusive
636  *
637  * @abstract
638  * Tries to locks a rw_lock in write mode.
639  *
640  * @discussion
641  * This function will return and not block in case the lock is already held.
642  * See lck_rw_lock_exclusive for more details.
643  *
644  * @param lck           rw_lock to lock.
645  *
646  * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held.
647  */
648 extern boolean_t        lck_rw_try_lock_exclusive(
649 	lck_rw_t                *lck);
650 
651 /*!
652  * @function lck_rw_done
653  *
654  * @abstract
655  * Force unlocks a rw_lock without consistency checks.
656  *
657  * @discussion
658  * Do not use unless sure you can avoid consistency checks.
659  *
660  * @param lck           rw_lock to unlock.
661  */
662 extern lck_rw_type_t    lck_rw_done(
663 	lck_rw_t                *lck);
664 #endif /* KERNEL_PRIVATE */
665 
666 __END_DECLS
667 
668 #endif /* _KERN_RW_LOCK_H_ */
669