1 /* 2 * Copyright (c) 2021 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _KERN_RW_LOCK_H_ 30 #define _KERN_RW_LOCK_H_ 31 32 #include <kern/lock_types.h> 33 #include <kern/lock_group.h> 34 #include <kern/lock_attr.h> 35 36 #ifdef XNU_KERNEL_PRIVATE 37 #include <kern/startup.h> 38 #endif /* XNU_KERNEL_PRIVATE */ 39 40 __BEGIN_DECLS 41 42 #ifdef MACH_KERNEL_PRIVATE 43 44 typedef union { 45 struct { 46 uint16_t shared_count; /* No. of shared granted request */ 47 uint16_t 48 interlock: 1, /* Interlock */ 49 priv_excl: 1, /* priority for Writer */ 50 want_upgrade: 1, /* Read-to-write upgrade waiting */ 51 want_excl: 1, /* Writer is waiting, or locked for write */ 52 r_waiting: 1, /* Someone is sleeping on lock */ 53 w_waiting: 1, /* Writer is sleeping on lock */ 54 can_sleep: 1, /* Can attempts to lock go to sleep? */ 55 _pad2: 8, /* padding */ 56 tag_valid: 1; /* Field is actually a tag, not a bitfield */ 57 }; 58 uint32_t data; /* Single word version of bitfields and shared count */ 59 } lck_rw_word_t; 60 61 typedef struct { 62 uint32_t lck_rw_unused : 24; /* tsid one day ... */ 63 uint32_t lck_rw_type : 8; /* LCK_TYPE_RW */ 64 uint32_t lck_rw_padding; 65 lck_rw_word_t lck_rw; 66 uint32_t lck_rw_owner; /* ctid_t */ 67 } lck_rw_t; /* arm: 8 arm64: 16 x86: 16 */ 68 69 #define lck_rw_shared_count lck_rw.shared_count 70 #define lck_rw_interlock lck_rw.interlock 71 #define lck_rw_priv_excl lck_rw.priv_excl 72 #define lck_rw_want_upgrade lck_rw.want_upgrade 73 #define lck_rw_want_excl lck_rw.want_excl 74 #define lck_r_waiting lck_rw.r_waiting 75 #define lck_w_waiting lck_rw.w_waiting 76 #define lck_rw_can_sleep lck_rw.can_sleep 77 #define lck_rw_data lck_rw.data 78 // tag and data reference the same memory. When the tag_valid bit is set, 79 // the data word should be treated as a tag instead of a bitfield. 80 #define lck_rw_tag_valid lck_rw.tag_valid 81 #define lck_rw_tag lck_rw.data 82 83 #define LCK_RW_SHARED_READER_OFFSET 0 84 #define LCK_RW_INTERLOCK_BIT 16 85 #define LCK_RW_PRIV_EXCL_BIT 17 86 #define LCK_RW_WANT_UPGRADE_BIT 18 87 #define LCK_RW_WANT_EXCL_BIT 19 88 #define LCK_RW_R_WAITING_BIT 20 89 #define LCK_RW_W_WAITING_BIT 21 90 #define LCK_RW_CAN_SLEEP_BIT 22 91 // 23-30 92 #define LCK_RW_TAG_VALID_BIT 31 93 94 #define LCK_RW_INTERLOCK (1U << LCK_RW_INTERLOCK_BIT) 95 #define LCK_RW_R_WAITING (1U << LCK_RW_R_WAITING_BIT) 96 #define LCK_RW_W_WAITING (1U << LCK_RW_W_WAITING_BIT) 97 #define LCK_RW_WANT_UPGRADE (1U << LCK_RW_WANT_UPGRADE_BIT) 98 #define LCK_RW_WANT_EXCL (1U << LCK_RW_WANT_EXCL_BIT) 99 #define LCK_RW_TAG_VALID (1U << LCK_RW_TAG_VALID_BIT) 100 #define LCK_RW_PRIV_EXCL (1U << LCK_RW_PRIV_EXCL_BIT) 101 #define LCK_RW_SHARED_MASK (0xffff << LCK_RW_SHARED_READER_OFFSET) 102 #define LCK_RW_SHARED_READER (0x1 << LCK_RW_SHARED_READER_OFFSET) 103 104 #define LCK_RW_TAG_DESTROYED ((LCK_RW_TAG_VALID | 0xdddddeadu)) /* lock marked as Destroyed */ 105 106 #elif KERNEL_PRIVATE 107 typedef struct { 108 uintptr_t opaque[2] __kernel_data_semantics; 109 } lck_rw_t; 110 #else /* @KERNEL_PRIVATE */ 111 typedef struct __lck_rw_t__ lck_rw_t; 112 #endif /* !KERNEL_PRIVATE */ 113 114 #if DEVELOPMENT || DEBUG 115 #ifdef XNU_KERNEL_PRIVATE 116 117 #define DEBUG_RW 1 118 #define LCK_RW_EXPECTED_MAX_NUMBER 3 /* Expected number per thread of concurrently held rw_lock */ 119 120 #if __LP64__ 121 #define LCK_RW_CALLER_PACKED_BITS 48 122 #define LCK_RW_CALLER_PACKED_SHIFT 0 123 #define LCK_RW_CALLER_PACKED_BASE 0 124 #else 125 #define LCK_RW_CALLER_PACKED_BITS 32 126 #define LCK_RW_CALLER_PACKED_SHIFT 0 127 #define LCK_RW_CALLER_PACKED_BASE 0 128 #endif 129 130 _Static_assert(!VM_PACKING_IS_BASE_RELATIVE(LCK_RW_CALLER_PACKED), 131 "Make sure the rwlde_caller_packed pointer packing is based on arithmetic shifts"); 132 133 134 struct __attribute__ ((packed)) rw_lock_debug_entry { 135 lck_rw_t *rwlde_lock; // rw_lock held 136 int8_t rwlde_mode_count; // -1 is held in write mode, positive value is the recursive read count 137 #if __LP64__ 138 uintptr_t rwlde_caller_packed: LCK_RW_CALLER_PACKED_BITS; // caller that created the entry 139 #else 140 uintptr_t rwlde_caller_packed; // caller that created the entry 141 #endif 142 }; 143 typedef struct rw_lock_debug { 144 struct rw_lock_debug_entry rwld_locks[LCK_RW_EXPECTED_MAX_NUMBER]; /* rw_lock debug info of currently held locks */ 145 uint8_t rwld_locks_saved : 7, /* number of locks saved in rwld_locks */ 146 rwld_overflow : 1; /* lock_entry was full, so it might be inaccurate */ 147 uint32_t rwld_locks_acquired; /* number of locks acquired */ 148 } rw_lock_debug_t; 149 150 _Static_assert(LCK_RW_EXPECTED_MAX_NUMBER <= 127, "LCK_RW_EXPECTED_MAX_NUMBER bigger than rwld_locks_saved"); 151 152 #endif /* XNU_KERNEL_PRIVATE */ 153 #endif /* DEVELOPMENT || DEBUG */ 154 155 typedef unsigned int lck_rw_type_t; 156 157 #define LCK_RW_TYPE_SHARED 0x01 158 #define LCK_RW_TYPE_EXCLUSIVE 0x02 159 160 #define decl_lck_rw_data(class, name) class lck_rw_t name 161 162 #if XNU_KERNEL_PRIVATE 163 /* 164 * Auto-initializing rw-locks declarations 165 * ------------------------------------ 166 * 167 * Unless you need to configure your locks in very specific ways, 168 * there is no point creating explicit lock attributes. For most 169 * static locks, this declaration macro can be used: 170 * 171 * - LCK_RW_DECLARE. 172 * 173 * For cases when some particular attributes need to be used, 174 * LCK_RW_DECLARE_ATTR takes a variable declared with 175 * LCK_ATTR_DECLARE as an argument. 176 */ 177 178 struct lck_rw_startup_spec { 179 lck_rw_t *lck; 180 lck_grp_t *lck_grp; 181 lck_attr_t *lck_attr; 182 }; 183 184 extern void lck_rw_startup_init( 185 struct lck_rw_startup_spec *spec); 186 187 #define LCK_RW_DECLARE_ATTR(var, grp, attr) \ 188 lck_rw_t var; \ 189 static __startup_data struct lck_rw_startup_spec \ 190 __startup_lck_rw_spec_ ## var = { &var, grp, attr }; \ 191 STARTUP_ARG(LOCKS, STARTUP_RANK_FOURTH, lck_rw_startup_init, \ 192 &__startup_lck_rw_spec_ ## var) 193 194 #define LCK_RW_DECLARE(var, grp) \ 195 LCK_RW_DECLARE_ATTR(var, grp, LCK_ATTR_NULL) 196 197 #define LCK_RW_ASSERT_SHARED 0x01 198 #define LCK_RW_ASSERT_EXCLUSIVE 0x02 199 #define LCK_RW_ASSERT_HELD 0x03 200 #define LCK_RW_ASSERT_NOTHELD 0x04 201 #endif /* XNU_KERNEL_PRIVATE */ 202 203 #if MACH_ASSERT 204 #define LCK_RW_ASSERT(lck, type) lck_rw_assert((lck),(type)) 205 #else /* MACH_ASSERT */ 206 #define LCK_RW_ASSERT(lck, type) 207 #endif /* MACH_ASSERT */ 208 209 #if DEBUG 210 #define LCK_RW_ASSERT_DEBUG(lck, type) lck_rw_assert((lck),(type)) 211 #else /* DEBUG */ 212 #define LCK_RW_ASSERT_DEBUG(lck, type) 213 #endif /* DEBUG */ 214 215 /*! 216 * @function lck_rw_alloc_init 217 * 218 * @abstract 219 * Allocates and initializes a rw_lock_t. 220 * 221 * @discussion 222 * The function can block. See lck_rw_init() for initialization details. 223 * 224 * @param grp lock group to associate with the lock. 225 * @param attr lock attribute to initialize the lock. 226 * 227 * @returns NULL or the allocated lock 228 */ 229 extern lck_rw_t *lck_rw_alloc_init( 230 lck_grp_t *grp, 231 lck_attr_t *attr); 232 233 /*! 234 * @function lck_rw_init 235 * 236 * @abstract 237 * Initializes a rw_lock_t. 238 * 239 * @discussion 240 * Usage statistics for the lock are going to be added to the lock group provided. 241 * 242 * The lock attribute can be LCK_ATTR_NULL or an attribute can be allocated with 243 * lck_attr_alloc_init. So far however none of the attribute settings are supported. 244 * 245 * @param lck lock to initialize. 246 * @param grp lock group to associate with the lock. 247 * @param attr lock attribute to initialize the lock. 248 */ 249 extern void lck_rw_init( 250 lck_rw_t *lck, 251 lck_grp_t *grp, 252 lck_attr_t *attr); 253 254 /*! 255 * @function lck_rw_free 256 * 257 * @abstract 258 * Frees a rw_lock previously allocated with lck_rw_alloc_init(). 259 * 260 * @discussion 261 * The lock must be not held by any thread. 262 * 263 * @param lck rw_lock to free. 264 */ 265 extern void lck_rw_free( 266 lck_rw_t *lck, 267 lck_grp_t *grp); 268 269 /*! 270 * @function lck_rw_destroy 271 * 272 * @abstract 273 * Destroys a rw_lock previously initialized with lck_rw_init(). 274 * 275 * @discussion 276 * The lock must be not held by any thread. 277 * 278 * @param lck rw_lock to destroy. 279 */ 280 extern void lck_rw_destroy( 281 lck_rw_t *lck, 282 lck_grp_t *grp); 283 284 /*! 285 * @function lck_rw_lock 286 * 287 * @abstract 288 * Locks a rw_lock with the specified type. 289 * 290 * @discussion 291 * See lck_rw_lock_shared() or lck_rw_lock_exclusive() for more details. 292 * 293 * @param lck rw_lock to lock. 294 * @param lck_rw_type LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE 295 */ 296 extern void lck_rw_lock( 297 lck_rw_t *lck, 298 lck_rw_type_t lck_rw_type); 299 300 /*! 301 * @function lck_rw_try_lock 302 * 303 * @abstract 304 * Tries to locks a rw_lock with the specified type. 305 * 306 * @discussion 307 * This function will return and not wait/block in case the lock is already held. 308 * See lck_rw_try_lock_shared() or lck_rw_try_lock_exclusive() for more details. 309 * 310 * @param lck rw_lock to lock. 311 * @param lck_rw_type LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE 312 * 313 * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held. 314 */ 315 extern boolean_t lck_rw_try_lock( 316 lck_rw_t *lck, 317 lck_rw_type_t lck_rw_type); 318 319 /*! 320 * @function lck_rw_unlock 321 * 322 * @abstract 323 * Unlocks a rw_lock previously locked with lck_rw_type. 324 * 325 * @discussion 326 * The lock must be unlocked by the same thread it was locked from. 327 * The type of the lock/unlock have to match, unless an upgrade/downgrade was performed while 328 * holding the lock. 329 * 330 * @param lck rw_lock to unlock. 331 * @param lck_rw_type LCK_RW_TYPE_SHARED or LCK_RW_TYPE_EXCLUSIVE 332 */ 333 extern void lck_rw_unlock( 334 lck_rw_t *lck, 335 lck_rw_type_t lck_rw_type); 336 337 /*! 338 * @function lck_rw_lock_shared 339 * 340 * @abstract 341 * Locks a rw_lock in shared mode. 342 * 343 * @discussion 344 * This function can block. 345 * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time 346 * can acquire it in exclusive mode. 347 * If the lock is held in shared mode and there are no writers waiting, a reader will be able to acquire 348 * the lock without waiting. 349 * If the lock is held in shared mode and there is at least a writer waiting, a reader will wait 350 * for all the writers to make progress. 351 * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported. 352 * 353 * @param lck rw_lock to lock. 354 */ 355 extern void lck_rw_lock_shared( 356 lck_rw_t *lck); 357 358 359 #if MACH_KERNEL_PRIVATE 360 /*! 361 * @function lck_rw_lock_shared_b 362 * 363 * @abstract 364 * Locks a rw_lock in shared mode. Returns early if the lock can't be acquired 365 * and the specified block returns true. 366 * 367 * @discussion 368 * Identical to lck_rw_lock_shared() but can return early if the lock can't be 369 * acquired and the specified block returns true. The block is called 370 * repeatedly when waiting to acquire the lock. 371 * Should only be called when the lock cannot sleep (i.e. when 372 * lock->lck_rw_can_sleep is false). 373 * 374 * @param lock rw_lock to lock. 375 * @param lock_pause block invoked while waiting to acquire lock 376 * 377 * @returns Returns TRUE if the lock is successfully taken, 378 * FALSE if the block returns true and the lock has 379 * not been acquired. 380 */ 381 extern boolean_t 382 lck_rw_lock_shared_b( 383 lck_rw_t * lock, 384 bool (^lock_pause)(void)); 385 386 /*! 387 * @function lck_rw_lock_exclusive_b 388 * 389 * @abstract 390 * Locks a rw_lock in exclusive mode. Returns early if the lock can't be acquired 391 * and the specified block returns true. 392 * 393 * @discussion 394 * Identical to lck_rw_lock_exclusive() but can return early if the lock can't be 395 * acquired and the specified block returns true. The block is called 396 * repeatedly when waiting to acquire the lock. 397 * Should only be called when the lock cannot sleep (i.e. when 398 * lock->lck_rw_can_sleep is false). 399 * 400 * @param lock rw_lock to lock. 401 * @param lock_pause block invoked while waiting to acquire lock 402 * 403 * @returns Returns TRUE if the lock is successfully taken, 404 * FALSE if the block returns true and the lock has 405 * not been acquired. 406 */ 407 extern boolean_t 408 lck_rw_lock_exclusive_b( 409 lck_rw_t * lock, 410 bool (^lock_pause)(void)); 411 #endif /* MACH_KERNEL_PRIVATE */ 412 413 /*! 414 * @function lck_rw_lock_shared_to_exclusive 415 * 416 * @abstract 417 * Upgrades a rw_lock held in shared mode to exclusive. 418 * 419 * @discussion 420 * This function can block. 421 * Only one reader at a time can upgrade to exclusive mode. If the upgrades fails the function will 422 * return with the lock not held. 423 * The caller needs to hold the lock in shared mode to upgrade it. 424 * 425 * @param lck rw_lock already held in shared mode to upgrade. 426 * 427 * @returns TRUE if the lock was upgraded, FALSE if it was not possible. 428 * If the function was not able to upgrade the lock, the lock will be dropped 429 * by the function. 430 */ 431 extern boolean_t lck_rw_lock_shared_to_exclusive( 432 lck_rw_t *lck); 433 434 /*! 435 * @function lck_rw_unlock_shared 436 * 437 * @abstract 438 * Unlocks a rw_lock previously locked in shared mode. 439 * 440 * @discussion 441 * The same thread that locked the lock needs to unlock it. 442 * 443 * @param lck rw_lock held in shared mode to unlock. 444 */ 445 extern void lck_rw_unlock_shared( 446 lck_rw_t *lck); 447 448 /*! 449 * @function lck_rw_lock_exclusive 450 * 451 * @abstract 452 * Locks a rw_lock in exclusive mode. 453 * 454 * @discussion 455 * This function can block. 456 * Multiple threads can acquire the lock in shared mode at the same time, but only one thread at a time 457 * can acquire it in exclusive mode. 458 * NOTE: the thread cannot return to userspace while the lock is held. Recursive locking is not supported. 459 * 460 * @param lck rw_lock to lock. 461 */ 462 extern void lck_rw_lock_exclusive( 463 lck_rw_t *lck); 464 465 /*! 466 * @function lck_rw_lock_exclusive_to_shared 467 * 468 * @abstract 469 * Downgrades a rw_lock held in exclusive mode to shared. 470 * 471 * @discussion 472 * The caller needs to hold the lock in exclusive mode to be able to downgrade it. 473 * 474 * @param lck rw_lock already held in exclusive mode to downgrade. 475 */ 476 extern void lck_rw_lock_exclusive_to_shared( 477 lck_rw_t *lck); 478 479 /*! 480 * @function lck_rw_unlock_exclusive 481 * 482 * @abstract 483 * Unlocks a rw_lock previously locked in exclusive mode. 484 * 485 * @discussion 486 * The same thread that locked the lock needs to unlock it. 487 * 488 * @param lck rw_lock held in exclusive mode to unlock. 489 */ 490 extern void lck_rw_unlock_exclusive( 491 lck_rw_t *lck); 492 493 /*! 494 * @function lck_rw_sleep 495 * 496 * @abstract 497 * Assert_wait on an event while holding the rw_lock. 498 * 499 * @discussion 500 * the flags can decide how to re-acquire the lock upon wake up 501 * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK) 502 * and if the priority needs to be kept boosted until the lock is 503 * re-acquired (LCK_SLEEP_PROMOTED_PRI). 504 * 505 * @param lck rw_lock to use to synch the assert_wait. 506 * @param lck_sleep_action flags. 507 * @param event event to assert_wait on. 508 * @param interruptible wait type. 509 */ 510 extern wait_result_t lck_rw_sleep( 511 lck_rw_t *lck, 512 lck_sleep_action_t lck_sleep_action, 513 event_t event, 514 wait_interrupt_t interruptible); 515 516 /*! 517 * @function lck_rw_sleep_deadline 518 * 519 * @abstract 520 * Assert_wait_deadline on an event while holding the rw_lock. 521 * 522 * @discussion 523 * the flags can decide how to re-acquire the lock upon wake up 524 * (LCK_SLEEP_SHARED, or LCK_SLEEP_EXCLUSIVE, or LCK_SLEEP_UNLOCK) 525 * and if the priority needs to be kept boosted until the lock is 526 * re-acquired (LCK_SLEEP_PROMOTED_PRI). 527 * 528 * @param lck rw_lock to use to synch the assert_wait. 529 * @param lck_sleep_action flags. 530 * @param event event to assert_wait on. 531 * @param interruptible wait type. 532 * @param deadline maximum time after which being woken up 533 */ 534 extern wait_result_t lck_rw_sleep_deadline( 535 lck_rw_t *lck, 536 lck_sleep_action_t lck_sleep_action, 537 event_t event, 538 wait_interrupt_t interruptible, 539 uint64_t deadline); 540 541 #ifdef XNU_KERNEL_PRIVATE 542 /*! 543 * @function lck_rw_assert 544 * 545 * @abstract 546 * Asserts the rw_lock is held. 547 * 548 * @discussion 549 * read-write locks do not have a concept of ownership when held in shared mode, 550 * so this function merely asserts that someone is holding the lock, not necessarily the caller. 551 * However if rw_lock_debug is on, a best effort mechanism to track the owners is in place, and 552 * this function can be more accurate. 553 * Type can be LCK_RW_ASSERT_SHARED, LCK_RW_ASSERT_EXCLUSIVE, LCK_RW_ASSERT_HELD 554 * LCK_RW_ASSERT_NOTHELD. 555 * 556 * @param lck rw_lock to check. 557 * @param type assert type 558 */ 559 extern void lck_rw_assert( 560 lck_rw_t *lck, 561 unsigned int type); 562 563 /*! 564 * @function kdp_lck_rw_lock_is_acquired_exclusive 565 * 566 * @abstract 567 * Checks if a rw_lock is held exclusevely. 568 * 569 * @discussion 570 * NOT SAFE: To be used only by kernel debugger to avoid deadlock. 571 * 572 * @param lck lock to check 573 * 574 * @returns TRUE if the lock is held exclusevely 575 */ 576 extern boolean_t kdp_lck_rw_lock_is_acquired_exclusive( 577 lck_rw_t *lck); 578 579 /*! 580 * @function lck_rw_lock_exclusive_check_contended 581 * 582 * @abstract 583 * Locks a rw_lock in exclusive mode. 584 * 585 * @discussion 586 * This routine IS EXPERIMENTAL. 587 * It's only used for the vm object lock, and use for other subsystems is UNSUPPORTED. 588 * Note that the return value is ONLY A HEURISTIC w.r.t. the lock's contention. 589 * 590 * @param lck rw_lock to lock. 591 * 592 * @returns Returns TRUE if the thread spun or blocked while attempting to acquire the lock, FALSE 593 * otherwise. 594 */ 595 extern bool lck_rw_lock_exclusive_check_contended( 596 lck_rw_t *lck); 597 598 /*! 599 * @function lck_rw_lock_yield_shared 600 * 601 * @abstract 602 * Yields a rw_lock held in shared mode. 603 * 604 * @discussion 605 * This function can block. 606 * Yields the lock in case there are writers waiting. 607 * The yield will unlock, block, and re-lock the lock in shared mode. 608 * 609 * @param lck rw_lock already held in shared mode to yield. 610 * @param force_yield if set to true it will always yield irrespective of the lock status 611 * 612 * @returns TRUE if the lock was yield, FALSE otherwise 613 */ 614 extern bool lck_rw_lock_yield_shared( 615 lck_rw_t *lck, 616 boolean_t force_yield); 617 618 __enum_decl(lck_rw_yield_t, uint32_t, { 619 LCK_RW_YIELD_WRITERS_ONLY, 620 LCK_RW_YIELD_ANY_WAITER, 621 LCK_RW_YIELD_ALWAYS, 622 }); 623 624 /*! 625 * @function lck_rw_lock_yield_exclusive 626 * 627 * @abstract 628 * Yields a rw_lock held in exclusive mode. 629 * 630 * @discussion 631 * This function can block. 632 * Yields the lock in case there are writers waiting. 633 * The yield will unlock, block, and re-lock the lock in exclusive mode. 634 * 635 * @param lck rw_lock already held in exclusive mode to yield. 636 * @param mode when to yield. 637 * 638 * @returns TRUE if the lock was yield, FALSE otherwise 639 */ 640 extern bool lck_rw_lock_yield_exclusive( 641 lck_rw_t *lck, 642 lck_rw_yield_t mode); 643 644 #endif /* XNU_KERNEL_PRIVATE */ 645 646 #if MACH_KERNEL_PRIVATE 647 648 /*! 649 * @function lck_rw_clear_promotion 650 * 651 * @abstract 652 * Undo priority promotions when the last rw_lock 653 * is released by a thread (if a promotion was active). 654 * 655 * @param thread thread to demote. 656 * @param trace_obj object reason for the demotion. 657 */ 658 extern void lck_rw_clear_promotion( 659 thread_t thread, 660 uintptr_t trace_obj); 661 662 /*! 663 * @function lck_rw_set_promotion_locked 664 * 665 * @abstract 666 * Callout from context switch if the thread goes 667 * off core with a positive rwlock_count. 668 * 669 * @discussion 670 * Called at splsched with the thread locked. 671 * 672 * @param thread thread to promote. 673 */ 674 extern void lck_rw_set_promotion_locked( 675 thread_t thread); 676 677 #endif /* MACH_KERNEL_PRIVATE */ 678 679 #ifdef KERNEL_PRIVATE 680 /*! 681 * @function lck_rw_try_lock_shared 682 * 683 * @abstract 684 * Tries to locks a rw_lock in read mode. 685 * 686 * @discussion 687 * This function will return and not block in case the lock is already held. 688 * See lck_rw_lock_shared for more details. 689 * 690 * @param lck rw_lock to lock. 691 * 692 * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held. 693 */ 694 extern boolean_t lck_rw_try_lock_shared( 695 lck_rw_t *lck); 696 697 /*! 698 * @function lck_rw_try_lock_exclusive 699 * 700 * @abstract 701 * Tries to locks a rw_lock in write mode. 702 * 703 * @discussion 704 * This function will return and not block in case the lock is already held. 705 * See lck_rw_lock_exclusive for more details. 706 * 707 * @param lck rw_lock to lock. 708 * 709 * @returns TRUE if the lock is successfully acquired, FALSE in case it was already held. 710 */ 711 extern boolean_t lck_rw_try_lock_exclusive( 712 lck_rw_t *lck); 713 714 /*! 715 * @function lck_rw_done 716 * 717 * @abstract 718 * Force unlocks a rw_lock without consistency checks. 719 * 720 * @discussion 721 * Do not use unless sure you can avoid consistency checks. 722 * 723 * @param lck rw_lock to unlock. 724 */ 725 extern lck_rw_type_t lck_rw_done( 726 lck_rw_t *lck); 727 #endif /* KERNEL_PRIVATE */ 728 729 __END_DECLS 730 731 #endif /* _KERN_RW_LOCK_H_ */ 732