1 /* 2 * Copyright (c) 2022 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _KERN_LOCKS_INTERNAL_H_ 30 #define _KERN_LOCKS_INTERNAL_H_ 31 32 #define LOCK_PRIVATE 1 33 #include <sys/cdefs.h> 34 #include <stdint.h> 35 #include <kern/startup.h> 36 #include <kern/percpu.h> 37 #include <kern/lock_types.h> 38 #include <kern/lock_group.h> 39 #include <machine/locks.h> 40 #include <machine/cpu_number.h> 41 #include <os/atomic_private.h> 42 43 /* 44 * This file shares implementation details for XNU lock implementations. 45 * It is not meant to be shared with any other part of the code. 46 */ 47 48 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN 49 50 #pragma GCC visibility push(hidden) 51 52 /*! 53 * @macro hw_spin_wait_until_n() 54 * 55 * @brief 56 * Abstracts the platform specific way to spin around the value 57 * of a memory location until a certain condition is met. 58 * 59 * @param count how many times to spin without evaluating progress 60 * @param ptr the pointer to the memory location being observed 61 * @param load_var the variable to store the result of the load into 62 * @param cond_expr the stopping condition (can use @c load_var) 63 * 64 * @returns 65 * - 0 if the loop stopped when the counter expired 66 * - cond_expr's return value otherwise 67 */ 68 #define hw_spin_wait_until_n(count, ptr, load_var, cond_expr) ({ \ 69 typeof((cond_expr)) __cond_result; \ 70 \ 71 for (uint32_t __cond_init = (count), __cond_count = __cond_init; \ 72 __probable(__cond_count-- > 0);) { \ 73 __hw_spin_wait_load(ptr, load_var, __cond_result, cond_expr); \ 74 if (__probable(__cond_result)) { \ 75 break; \ 76 } \ 77 } \ 78 \ 79 __cond_result; \ 80 }) 81 82 /*! 83 * @macro hw_spin_wait_until() 84 * 85 * @brief 86 * Conveniency wrapper for hw_spin_wait_until_n() with the typical 87 * LOCK_SNOOP_SPINS counter for progress evaluation. 88 */ 89 #define hw_spin_wait_until(ptr, load_var, cond_expr) \ 90 hw_spin_wait_until_n(LOCK_SNOOP_SPINS, ptr, load_var, cond_expr) 91 92 93 #if LOCK_PRETEST 94 #define lck_pretestv(p, e, g) ({ \ 95 __auto_type __e = (e); \ 96 __auto_type __v = os_atomic_load(p, relaxed); \ 97 if (__v != __e) { \ 98 *(g) = __v; \ 99 } \ 100 __v == __e; \ 101 }) 102 #define lck_pretest(p, e) \ 103 (os_atomic_load(p, relaxed) == (e)) 104 #else 105 #define lck_pretestv(p, e, g) 1 106 #define lck_pretest(p, e) 1 107 #endif 108 109 /*! 110 * @function lock_cmpxchg 111 * 112 * @brief 113 * Similar to os_atomic_cmpxchg() but with a pretest when LOCK_PRETEST is set. 114 */ 115 #define lock_cmpxchg(p, e, v, m) ({ \ 116 __auto_type _p = (p); \ 117 __auto_type _e = (e); \ 118 lck_pretest(_p, _e) && os_atomic_cmpxchg(_p, _e, v, m); \ 119 }) 120 121 /*! 122 * @function lock_cmpxchgv 123 * 124 * @brief 125 * Similar to os_atomic_cmpxchgv() but with a pretest when LOCK_PRETEST is set. 126 */ 127 #define lock_cmpxchgv(p, e, v, g, m) ({ \ 128 __auto_type _p = (p); \ 129 __auto_type _e = (e); \ 130 lck_pretestv(_p, _e, g) && os_atomic_cmpxchgv(_p, _e, v, g, m); \ 131 }) 132 133 #if OS_ATOMIC_HAS_LLSC 134 #define lock_load_exclusive(p, m) os_atomic_load_exclusive(p, m) 135 #define lock_wait_for_event() wait_for_event() 136 #define lock_store_exclusive(p, ov, nv, m) os_atomic_store_exclusive(p, nv, m) 137 #else 138 #define lock_load_exclusive(p, m) os_atomic_load(p, relaxed) 139 #define lock_wait_for_event() cpu_pause() 140 #define lock_store_exclusive(p, ov, nv, m) os_atomic_cmpxchg(p, ov, nv, m) 141 #endif 142 143 144 /*! 145 * @enum lck_type_t 146 * 147 * @brief 148 * A one-byte type tag used in byte 3 of locks to be able to identify them. 149 */ 150 __enum_decl(lck_type_t, uint8_t, { 151 LCK_TYPE_NONE = 0x00, 152 LCK_TYPE_MUTEX = 0x22, 153 LCK_TYPE_RW = 0x33, 154 LCK_TYPE_TICKET = 0x44, 155 LCK_TYPE_GATE = 0x55, 156 }); 157 158 #pragma GCC visibility pop 159 160 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS 161 162 #endif /* _KERN_LOCKS_INTERNAL_H_ */ 163