1 /* 2 * Copyright (c) 2021 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _KERN_TICKET_LOCK_H_ 30 #define _KERN_TICKET_LOCK_H_ 31 32 #ifndef __ASSEMBLER__ 33 #include <kern/assert.h> 34 #include <kern/lock_types.h> 35 #include <kern/lock_group.h> 36 #if XNU_KERNEL_PRIVATE 37 #include <kern/counter.h> 38 #endif /* XNU_KERNEL_PRIVATE */ 39 #endif /* __ASSEMBLER__ */ 40 41 #ifndef __ASSEMBLER__ 42 43 __BEGIN_DECLS 44 #pragma GCC visibility push(hidden) 45 46 #ifdef MACH_KERNEL_PRIVATE 47 48 /*! 49 * @typedef hw_lck_ticket_t 50 * 51 * @discussion 52 * This type describes the low level type for a ticket lock. 53 * @c lck_ticket_t provides a higher level abstraction 54 * that also provides thread ownership information. 55 * 56 * This is a low level lock meant to be part of data structures 57 * that are very constrained on space, or is part of a larger lock. 58 * 59 * This lower level interface supports an @c *_allow_invalid() 60 * to implement advanced memory reclamation schemes using sequestering. 61 * 62 * @c hw_lck_ticket_invalidate() must be used on locks 63 * that will be used this way: in addition to make subsequent calls to 64 * @c hw_lck_ticket_lock_allow_invalid() to fail, it allows for 65 * @c hw_lck_ticket_destroy() to synchronize with callers to 66 * @c hw_lck_ticket_lock_allow_invalid() who successfully reserved 67 * a ticket but will fail, ensuring the memory can't be freed too early. 68 * 69 * 70 * @c hw_lck_ticket_reserve() can be used to pre-reserve a ticket. 71 * When this function returns @c true, then the lock was acquired. 72 * When it returns @c false, then @c hw_lck_ticket_wait() must 73 * be called to wait for this ticket. 74 * 75 * This can be used to resolve certain lock inversions: assuming 76 * two locks, @c L (a mutex or any kind of lock) and @c T (a ticket lock), 77 * where @c L can be taken when @c T is held but not the other way around, 78 * then the following can be done to take both locks in "the wrong order", 79 * with a guarantee of forward progress: 80 * 81 * <code> 82 * // starts with L held 83 * uint32_t ticket; 84 * 85 * if (!hw_lck_ticket_reserve(T, &ticket)) { 86 * unlock(L); 87 * hw_lck_ticket_wait(T, ticket): 88 * lock(L); 89 * // possibly validate what might have changed 90 * // due to dropping L 91 * } 92 * 93 * // both L and T are held 94 * </code> 95 * 96 * This pattern above is safe even for a case when the protected 97 * resource contains the ticket lock @c T, provided that it is 98 * guaranteed that both @c T and @c L (in the proper order) will 99 * be taken before that resource death. In that case, in the resource 100 * destructor, when @c hw_lck_ticket_destroy() is called, it will 101 * wait for the reservation to be released. 102 * 103 * See @c waitq_pull_thread_locked() for an example of this where: 104 * - @c L is the thread lock of a thread waiting on a given waitq, 105 * - @c T is the lock for that waitq, 106 * - the waitq can't be destroyed before the thread is unhooked from it, 107 * which happens under both @c L and @c T. 108 * 109 * 110 * @note: 111 * At the moment, this construct only supports up to 255 CPUs. 112 * Supporting more CPUs requires losing the `lck_type` field, 113 * and burning the low bit of the cticket/nticket 114 * for the "invalidation" feature. 115 */ 116 typedef union hw_lck_ticket_s { 117 struct { 118 uint8_t lck_type; 119 uint8_t lck_valid : 1; 120 uint8_t lck_is_pv : 1; 121 uint8_t lck_unused : 6; 122 union { 123 struct { 124 uint8_t cticket; 125 uint8_t nticket; 126 }; 127 uint16_t tcurnext; 128 }; 129 }; 130 uint32_t lck_value; 131 } hw_lck_ticket_t; 132 133 /*! 134 * @typedef lck_ticket_t 135 * 136 * @discussion 137 * A higher level construct than hw_lck_ticket_t in 2 words 138 * like other kernel locks, which admits thread ownership information. 139 */ 140 typedef struct lck_ticket_s { 141 uint32_t __lck_ticket_unused : 24; 142 uint32_t lck_ticket_type : 8; 143 uint32_t lck_ticket_padding; 144 hw_lck_ticket_t tu; 145 uint32_t lck_ticket_owner; 146 } lck_ticket_t; 147 148 #else /* !MACH_KERNEL_PRIVATE */ 149 150 typedef struct { 151 uint32_t opaque0; 152 uint32_t opaque1; 153 uint32_t opaque2; 154 uint32_t opaque3; 155 } lck_ticket_t; 156 157 #endif /* !MACH_KERNEL_PRIVATE */ 158 #if MACH_KERNEL_PRIVATE 159 160 #if !LCK_GRP_USE_ARG 161 #define hw_lck_ticket_init(lck, grp) hw_lck_ticket_init(lck) 162 #define hw_lck_ticket_init_locked(lck, grp) hw_lck_ticket_init_locked(lck) 163 #define hw_lck_ticket_destroy(lck, grp) hw_lck_ticket_destroy(lck) 164 #define hw_lck_ticket_lock(lck, grp) hw_lck_ticket_lock(lck) 165 #define hw_lck_ticket_lock_nopreempt(lck, grp) hw_lck_ticket_lock_nopreempt(lck) 166 #define hw_lck_ticket_lock_to(lck, pol, grp) hw_lck_ticket_lock_to(lck, pol) 167 #define hw_lck_ticket_lock_nopreempt_to(lck, pol, grp) \ 168 hw_lck_ticket_lock_nopreempt_to(lck, pol) 169 #define hw_lck_ticket_lock_try(lck, grp) hw_lck_ticket_lock_try(lck) 170 #define hw_lck_ticket_lock_try_nopreempt(lck, grp) \ 171 hw_lck_ticket_lock_try_nopreempt(lck) 172 #define hw_lck_ticket_lock_allow_invalid(lck, pol, grp) \ 173 hw_lck_ticket_lock_allow_invalid(lck, pol) 174 #define hw_lck_ticket_reserve(lck, t, grp) hw_lck_ticket_reserve(lck, t) 175 #define hw_lck_ticket_reserve_nopreempt(lck, t, grp) \ 176 hw_lck_ticket_reserve_nopreempt(lck, t) 177 #define hw_lck_ticket_reserve_allow_invalid(lck, t, grp) \ 178 hw_lck_ticket_reserve_allow_invalid(lck, t) 179 #define hw_lck_ticket_wait(lck, ticket, pol, grp) \ 180 hw_lck_ticket_wait(lck, ticket, pol) 181 #endif /* !LCK_GRP_USE_ARG */ 182 183 184 /* init/destroy */ 185 186 extern void hw_lck_ticket_init( 187 hw_lck_ticket_t *tlock, 188 lck_grp_t *grp); 189 190 extern void hw_lck_ticket_init_locked( 191 hw_lck_ticket_t *tlock, 192 lck_grp_t *grp); 193 194 extern void hw_lck_ticket_destroy( 195 hw_lck_ticket_t *tlock, 196 lck_grp_t *grp); 197 198 extern void hw_lck_ticket_invalidate( 199 hw_lck_ticket_t *tlock); 200 201 extern bool hw_lck_ticket_held( 202 hw_lck_ticket_t *tlock) __result_use_check; 203 204 205 /* lock */ 206 207 extern void hw_lck_ticket_lock( 208 hw_lck_ticket_t *tlock, 209 lck_grp_t *grp); 210 211 extern void hw_lck_ticket_lock_nopreempt( 212 hw_lck_ticket_t *tlock, 213 lck_grp_t *grp); 214 215 extern hw_lock_status_t hw_lck_ticket_lock_to( 216 hw_lck_ticket_t *tlock, 217 hw_spin_policy_t policy, 218 lck_grp_t *grp); 219 220 extern hw_lock_status_t hw_lck_ticket_lock_nopreempt_to( 221 hw_lck_ticket_t *tlock, 222 hw_spin_policy_t policy, 223 lck_grp_t *grp); 224 225 226 /* lock_try */ 227 228 extern bool hw_lck_ticket_lock_try( 229 hw_lck_ticket_t *tlock, 230 lck_grp_t *grp) __result_use_check; 231 232 extern bool hw_lck_ticket_lock_try_nopreempt( 233 hw_lck_ticket_t *tlock, 234 lck_grp_t *grp) __result_use_check; 235 236 237 /* unlock */ 238 239 extern void hw_lck_ticket_unlock( 240 hw_lck_ticket_t *tlock); 241 242 extern void hw_lck_ticket_unlock_nopreempt( 243 hw_lck_ticket_t *tlock); 244 245 246 /* reserve/wait */ 247 248 extern bool hw_lck_ticket_reserve( 249 hw_lck_ticket_t *tlock, 250 uint32_t *ticket, 251 lck_grp_t *grp) __result_use_check; 252 253 extern bool hw_lck_ticket_reserve_nopreempt( 254 hw_lck_ticket_t *tlock, 255 uint32_t *ticket, 256 lck_grp_t *grp) __result_use_check; 257 258 extern hw_lock_status_t hw_lck_ticket_reserve_allow_invalid( 259 hw_lck_ticket_t *tlock, 260 uint32_t *ticket, 261 lck_grp_t *grp) __result_use_check; 262 263 extern hw_lock_status_t hw_lck_ticket_wait( 264 hw_lck_ticket_t *tlock, 265 uint32_t ticket, 266 hw_spin_policy_t policy, 267 lck_grp_t *grp); 268 269 extern hw_lock_status_t hw_lck_ticket_lock_allow_invalid( 270 hw_lck_ticket_t *tlock, 271 hw_spin_policy_t policy, 272 lck_grp_t *grp); 273 274 /* pv */ 275 276 extern void hw_lck_ticket_unlock_kick_pv( 277 hw_lck_ticket_t *tlock, 278 uint8_t value); 279 280 extern void hw_lck_ticket_lock_wait_pv( 281 hw_lck_ticket_t *tlock, 282 uint8_t value); 283 284 #endif /* MACH_KERNEL_PRIVATE */ 285 #if XNU_KERNEL_PRIVATE 286 287 extern bool kdp_lck_ticket_is_acquired( 288 lck_ticket_t *tlock) __result_use_check; 289 290 extern void lck_ticket_lock_nopreempt( 291 lck_ticket_t *tlock, 292 lck_grp_t *grp); 293 294 extern bool lck_ticket_lock_try( 295 lck_ticket_t *tlock, 296 lck_grp_t *grp) __result_use_check; 297 298 extern bool lck_ticket_lock_try_nopreempt( 299 lck_ticket_t *tlock, 300 lck_grp_t *grp) __result_use_check; 301 302 extern void lck_ticket_unlock_nopreempt( 303 lck_ticket_t *tlock); 304 305 #endif /* XNU_KERNEL_PRIVATE */ 306 307 extern __exported void lck_ticket_init( 308 lck_ticket_t *tlock, 309 lck_grp_t *grp); 310 311 extern __exported void lck_ticket_destroy( 312 lck_ticket_t *tlock, 313 lck_grp_t *grp); 314 315 extern __exported void lck_ticket_lock( 316 lck_ticket_t *tlock, 317 lck_grp_t *grp); 318 319 extern __exported void lck_ticket_unlock( 320 lck_ticket_t *tlock); 321 322 extern __exported void lck_ticket_assert_owned( 323 const lck_ticket_t *tlock); 324 325 extern __exported void lck_ticket_assert_not_owned( 326 const lck_ticket_t *tlock); 327 328 #define LCK_TICKET_ASSERT_OWNED(tlock) \ 329 MACH_ASSERT_DO(lck_ticket_assert_owned(tlock)) 330 #define LCK_TICKET_ASSERT_NOT_OWNED(tlock) \ 331 MACH_ASSERT_DO(lck_ticket_assert_not_owned(tlock)) 332 333 #pragma GCC visibility pop 334 __END_DECLS 335 336 #endif /* __ASSEMBLER__ */ 337 #if XNU_KERNEL_PRIVATE 338 339 #define HW_LCK_TICKET_LOCK_VALID_BIT 8 340 341 #if CONFIG_PV_TICKET 342 343 /* 344 * For the PV case, the lsbit of cticket is treated as as wait flag, 345 * and the ticket counters are incremented by 2 346 */ 347 #define HW_LCK_TICKET_LOCK_PVWAITFLAG ((uint8_t)1) 348 #define HW_LCK_TICKET_LOCK_INCREMENT ((uint8_t)2) 349 #define HW_LCK_TICKET_LOCK_INC_WORD 0x02000000 350 351 #if !defined(__ASSEMBLER__) && (DEBUG || DEVELOPMENT) 352 /* counters for sysctls */ 353 SCALABLE_COUNTER_DECLARE(ticket_wflag_cleared); 354 SCALABLE_COUNTER_DECLARE(ticket_wflag_still); 355 SCALABLE_COUNTER_DECLARE(ticket_just_unlock); 356 SCALABLE_COUNTER_DECLARE(ticket_kick_count); 357 SCALABLE_COUNTER_DECLARE(ticket_wait_count); 358 SCALABLE_COUNTER_DECLARE(ticket_already_count); 359 SCALABLE_COUNTER_DECLARE(ticket_spin_count); 360 #define PVTICKET_STATS_ADD(var, i) counter_add_preemption_disabled(&ticket_##var, (i)) 361 #define PVTICKET_STATS_INC(var) counter_inc_preemption_disabled(&ticket_##var) 362 #else 363 #define PVTICKET_STATS_ADD(var, i) /* empty */ 364 #define PVTICKET_STATS_INC(var) /* empty */ 365 #endif 366 367 #else /* CONFIG_PV_TICKET */ 368 369 #define HW_LCK_TICKET_LOCK_PVWAITFLAG ((uint8_t)0) 370 #define HW_LCK_TICKET_LOCK_INCREMENT ((uint8_t)1) 371 #define HW_LCK_TICKET_LOCK_INC_WORD 0x01000000 372 373 #endif /* CONFIG_PV_TICKET */ 374 #endif /* XNU_KERNEL_PRIVATE */ 375 #endif /* _KERN_TICKET_LOCK_H_ */ 376