1 /* 2 * Copyright (c) 2021 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifndef _KERN_TICKET_LOCK_H_ 30 #define _KERN_TICKET_LOCK_H_ 31 32 #ifndef __ASSEMBLER__ 33 #include <kern/lock_types.h> 34 #include <kern/lock_group.h> 35 #if XNU_KERNEL_PRIVATE 36 #include <kern/counter.h> 37 #endif /* XNU_KERNEL_PRIVATE */ 38 #endif /* __ASSEMBLER__ */ 39 40 #ifndef __ASSEMBLER__ 41 42 __BEGIN_DECLS 43 #pragma GCC visibility push(hidden) 44 45 #ifdef MACH_KERNEL_PRIVATE 46 47 /*! 48 * @typedef hw_lck_ticket_t 49 * 50 * @discussion 51 * This type describes the low level type for a ticket lock. 52 * @c lck_ticket_t provides a higher level abstraction 53 * that also provides thread ownership information. 54 * 55 * This is a low level lock meant to be part of data structures 56 * that are very constrained on space, or is part of a larger lock. 57 * 58 * This lower level interface supports an @c *_allow_invalid() 59 * to implement advanced memory reclamation schemes using sequestering. 60 * Do note that when @c CONFIG_PROB_GZALLOC is engaged, and the target lock 61 * comes from a zone, PGZ must be handled manually. 62 * See ipc_object_lock_allow_invalid() for an example of that. 63 * 64 * @c hw_lck_ticket_invalidate() must be used on locks 65 * that will be used this way: in addition to make subsequent calls to 66 * @c hw_lck_ticket_lock_allow_invalid() to fail, it allows for 67 * @c hw_lck_ticket_destroy() to synchronize with callers to 68 * @c hw_lck_ticket_lock_allow_invalid() who successfully reserved 69 * a ticket but will fail, ensuring the memory can't be freed too early. 70 * 71 * 72 * @c hw_lck_ticket_reserve() can be used to pre-reserve a ticket. 73 * When this function returns @c true, then the lock was acquired. 74 * When it returns @c false, then @c hw_lck_ticket_wait() must 75 * be called to wait for this ticket. 76 * 77 * This can be used to resolve certain lock inversions: assuming 78 * two locks, @c L (a mutex or any kind of lock) and @c T (a ticket lock), 79 * where @c L can be taken when @c T is held but not the other way around, 80 * then the following can be done to take both locks in "the wrong order", 81 * with a guarantee of forward progress: 82 * 83 * <code> 84 * // starts with L held 85 * uint32_t ticket; 86 * 87 * if (!hw_lck_ticket_reserve(T, &ticket)) { 88 * unlock(L); 89 * hw_lck_ticket_wait(T, ticket): 90 * lock(L); 91 * // possibly validate what might have changed 92 * // due to dropping L 93 * } 94 * 95 * // both L and T are held 96 * </code> 97 * 98 * This pattern above is safe even for a case when the protected 99 * resource contains the ticket lock @c T, provided that it is 100 * guaranteed that both @c T and @c L (in the proper order) will 101 * be taken before that resource death. In that case, in the resource 102 * destructor, when @c hw_lck_ticket_destroy() is called, it will 103 * wait for the reservation to be released. 104 * 105 * See @c waitq_pull_thread_locked() for an example of this where: 106 * - @c L is the thread lock of a thread waiting on a given waitq, 107 * - @c T is the lock for that waitq, 108 * - the waitq can't be destroyed before the thread is unhooked from it, 109 * which happens under both @c L and @c T. 110 * 111 * 112 * @note: 113 * At the moment, this construct only supports up to 255 CPUs. 114 * Supporting more CPUs requires losing the `lck_type` field, 115 * and burning the low bit of the cticket/nticket 116 * for the "invalidation" feature. 117 */ 118 typedef union { 119 struct { 120 uint8_t lck_type; 121 uint8_t lck_valid : 1; 122 uint8_t lck_is_pv : 1; 123 uint8_t lck_unused : 6; 124 union { 125 struct { 126 uint8_t cticket; 127 uint8_t nticket; 128 }; 129 uint16_t tcurnext; 130 }; 131 }; 132 uint32_t lck_value; 133 } hw_lck_ticket_t; 134 135 /*! 136 * @typedef lck_ticket_t 137 * 138 * @discussion 139 * A higher level construct than hw_lck_ticket_t in 2 words 140 * like other kernel locks, which admits thread ownership information. 141 */ 142 typedef struct { 143 uint32_t __lck_ticket_unused : 24; 144 uint32_t lck_ticket_type : 8; 145 uint32_t lck_ticket_padding; 146 hw_lck_ticket_t tu; 147 uint32_t lck_ticket_owner; 148 } lck_ticket_t; 149 150 #else /* !MACH_KERNEL_PRIVATE */ 151 152 typedef struct { 153 uint32_t opaque0; 154 uint32_t opaque1; 155 uint32_t opaque2; 156 uint32_t opaque3; 157 } lck_ticket_t; 158 159 #endif /* !MACH_KERNEL_PRIVATE */ 160 #if MACH_KERNEL_PRIVATE 161 162 #if !LCK_GRP_USE_ARG 163 #define hw_lck_ticket_init(lck, grp) hw_lck_ticket_init(lck) 164 #define hw_lck_ticket_init_locked(lck, grp) hw_lck_ticket_init_locked(lck) 165 #define hw_lck_ticket_destroy(lck, grp) hw_lck_ticket_destroy(lck) 166 #define hw_lck_ticket_lock(lck, grp) hw_lck_ticket_lock(lck) 167 #define hw_lck_ticket_lock_nopreempt(lck, grp) hw_lck_ticket_lock_nopreempt(lck) 168 #define hw_lck_ticket_lock_to(lck, pol, grp) hw_lck_ticket_lock_to(lck, pol) 169 #define hw_lck_ticket_lock_nopreempt_to(lck, pol, grp) \ 170 hw_lck_ticket_lock_nopreempt_to(lck, pol) 171 #define hw_lck_ticket_lock_try(lck, grp) hw_lck_ticket_lock_try(lck) 172 #define hw_lck_ticket_lock_try_nopreempt(lck, grp) \ 173 hw_lck_ticket_lock_try_nopreempt(lck) 174 #define hw_lck_ticket_lock_allow_invalid(lck, pol, grp) \ 175 hw_lck_ticket_lock_allow_invalid(lck, pol) 176 #define hw_lck_ticket_reserve(lck, t, grp) hw_lck_ticket_reserve(lck, t) 177 #define hw_lck_ticket_reserve_allow_invalid(lck, t, grp) \ 178 hw_lck_ticket_reserve_allow_invalid(lck, t) 179 #define hw_lck_ticket_wait(lck, ticket, pol, grp) \ 180 hw_lck_ticket_wait(lck, ticket, pol) 181 #endif /* !LCK_GRP_USE_ARG */ 182 183 184 /* init/destroy */ 185 186 extern void hw_lck_ticket_init( 187 hw_lck_ticket_t *tlock, 188 lck_grp_t *grp); 189 190 extern void hw_lck_ticket_init_locked( 191 hw_lck_ticket_t *tlock, 192 lck_grp_t *grp); 193 194 extern void hw_lck_ticket_destroy( 195 hw_lck_ticket_t *tlock, 196 lck_grp_t *grp); 197 198 extern void hw_lck_ticket_invalidate( 199 hw_lck_ticket_t *tlock); 200 201 extern bool hw_lck_ticket_held( 202 hw_lck_ticket_t *tlock) __result_use_check; 203 204 205 /* lock */ 206 207 extern void hw_lck_ticket_lock( 208 hw_lck_ticket_t *tlock, 209 lck_grp_t *grp); 210 211 extern void hw_lck_ticket_lock_nopreempt( 212 hw_lck_ticket_t *tlock, 213 lck_grp_t *grp); 214 215 extern hw_lock_status_t hw_lck_ticket_lock_to( 216 hw_lck_ticket_t *tlock, 217 hw_spin_policy_t policy, 218 lck_grp_t *grp); 219 220 extern hw_lock_status_t hw_lck_ticket_lock_nopreempt_to( 221 hw_lck_ticket_t *tlock, 222 hw_spin_policy_t policy, 223 lck_grp_t *grp); 224 225 226 /* lock_try */ 227 228 extern bool hw_lck_ticket_lock_try( 229 hw_lck_ticket_t *tlock, 230 lck_grp_t *grp) __result_use_check; 231 232 extern bool hw_lck_ticket_lock_try_nopreempt( 233 hw_lck_ticket_t *tlock, 234 lck_grp_t *grp) __result_use_check; 235 236 237 /* unlock */ 238 239 extern void hw_lck_ticket_unlock( 240 hw_lck_ticket_t *tlock); 241 242 extern void hw_lck_ticket_unlock_nopreempt( 243 hw_lck_ticket_t *tlock); 244 245 246 /* reserve/wait */ 247 248 extern bool hw_lck_ticket_reserve( 249 hw_lck_ticket_t *tlock, 250 uint32_t *ticket, 251 lck_grp_t *grp) __result_use_check; 252 253 extern hw_lock_status_t hw_lck_ticket_reserve_allow_invalid( 254 hw_lck_ticket_t *tlock, 255 uint32_t *ticket, 256 lck_grp_t *grp) __result_use_check; 257 258 extern hw_lock_status_t hw_lck_ticket_wait( 259 hw_lck_ticket_t *tlock, 260 uint32_t ticket, 261 hw_spin_policy_t policy, 262 lck_grp_t *grp); 263 264 extern hw_lock_status_t hw_lck_ticket_lock_allow_invalid( 265 hw_lck_ticket_t *tlock, 266 hw_spin_policy_t policy, 267 lck_grp_t *grp); 268 269 /* pv */ 270 271 extern void hw_lck_ticket_unlock_kick_pv( 272 hw_lck_ticket_t *tlock, 273 uint8_t value); 274 275 extern void hw_lck_ticket_lock_wait_pv( 276 hw_lck_ticket_t *tlock, 277 uint8_t value); 278 279 #endif /* MACH_KERNEL_PRIVATE */ 280 #if XNU_KERNEL_PRIVATE 281 282 extern bool kdp_lck_ticket_is_acquired( 283 lck_ticket_t *tlock) __result_use_check; 284 285 extern void lck_ticket_lock_nopreempt( 286 lck_ticket_t *tlock, 287 lck_grp_t *grp); 288 289 extern bool lck_ticket_lock_try( 290 lck_ticket_t *tlock, 291 lck_grp_t *grp) __result_use_check; 292 293 extern bool lck_ticket_lock_try_nopreempt( 294 lck_ticket_t *tlock, 295 lck_grp_t *grp) __result_use_check; 296 297 extern void lck_ticket_unlock_nopreempt( 298 lck_ticket_t *tlock); 299 300 #endif /* XNU_KERNEL_PRIVATE */ 301 302 extern __exported void lck_ticket_init( 303 lck_ticket_t *tlock, 304 lck_grp_t *grp); 305 306 extern __exported void lck_ticket_destroy( 307 lck_ticket_t *tlock, 308 lck_grp_t *grp); 309 310 extern __exported void lck_ticket_lock( 311 lck_ticket_t *tlock, 312 lck_grp_t *grp); 313 314 extern __exported void lck_ticket_unlock( 315 lck_ticket_t *tlock); 316 317 extern __exported void lck_ticket_assert_owned( 318 lck_ticket_t *tlock); 319 320 extern __exported void lck_ticket_assert_not_owned( 321 lck_ticket_t *tlock); 322 323 #if MACH_ASSERT 324 #define LCK_TICKET_ASSERT_OWNED(tlock) lck_ticket_assert_owned(tlock) 325 #define LCK_TICKET_ASSERT_NOT_OWNED(tlock) lck_ticket_assert_owned(tlock) 326 #else 327 #define LCK_TICKET_ASSERT_OWNED(tlock) (void)(tlock) 328 #define LCK_TICKET_ASSERT_NOT_OWNED(tlock) (void)(tlock) 329 #endif 330 331 #pragma GCC visibility pop 332 __END_DECLS 333 334 #endif /* __ASSEMBLER__ */ 335 #if XNU_KERNEL_PRIVATE 336 337 #define HW_LCK_TICKET_LOCK_VALID_BIT 8 338 339 #if CONFIG_PV_TICKET 340 341 /* 342 * For the PV case, the lsbit of cticket is treated as as wait flag, 343 * and the ticket counters are incremented by 2 344 */ 345 #define HW_LCK_TICKET_LOCK_PVWAITFLAG ((uint8_t)1) 346 #define HW_LCK_TICKET_LOCK_INCREMENT ((uint8_t)2) 347 #define HW_LCK_TICKET_LOCK_INC_WORD 0x02000000 348 349 #if !defined(__ASSEMBLER__) && (DEBUG || DEVELOPMENT) 350 /* counters for sysctls */ 351 SCALABLE_COUNTER_DECLARE(ticket_wflag_cleared); 352 SCALABLE_COUNTER_DECLARE(ticket_wflag_still); 353 SCALABLE_COUNTER_DECLARE(ticket_just_unlock); 354 SCALABLE_COUNTER_DECLARE(ticket_kick_count); 355 SCALABLE_COUNTER_DECLARE(ticket_wait_count); 356 SCALABLE_COUNTER_DECLARE(ticket_already_count); 357 SCALABLE_COUNTER_DECLARE(ticket_spin_count); 358 #define PVTICKET_STATS_ADD(var, i) counter_add_preemption_disabled(&ticket_##var, (i)) 359 #define PVTICKET_STATS_INC(var) counter_inc_preemption_disabled(&ticket_##var) 360 #else 361 #define PVTICKET_STATS_ADD(var, i) /* empty */ 362 #define PVTICKET_STATS_INC(var) /* empty */ 363 #endif 364 365 #else /* CONFIG_PV_TICKET */ 366 367 #define HW_LCK_TICKET_LOCK_PVWAITFLAG ((uint8_t)0) 368 #define HW_LCK_TICKET_LOCK_INCREMENT ((uint8_t)1) 369 #define HW_LCK_TICKET_LOCK_INC_WORD 0x01000000 370 371 #endif /* CONFIG_PV_TICKET */ 372 #endif /* XNU_KERNEL_PRIVATE */ 373 #endif /* _KERN_TICKET_LOCK_H_ */ 374