xref: /xnu-8019.80.24/osfmk/kern/lock_ticket.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2018 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #define ATOMIC_PRIVATE 1
29 #define LOCK_PRIVATE 1
30 
31 #include <stdint.h>
32 #include <kern/thread.h>
33 #include <machine/atomic.h>
34 #include <kern/locks.h>
35 #include <kern/lock_stat.h>
36 #include <machine/machine_cpu.h>
37 #include <os/atomic_private.h>
38 #include <vm/pmap.h>
39 
40 #if defined(__x86_64__)
41 #include <i386/mp.h>
42 extern uint64_t LockTimeOutTSC;
43 #define TICKET_LOCK_PANIC_TIMEOUT LockTimeOutTSC
44 #define lock_enable_preemption enable_preemption
45 #endif
46 
47 #if defined(__arm__) || defined(__arm64__)
48 extern uint64_t TLockTimeOut;
49 #define TICKET_LOCK_PANIC_TIMEOUT TLockTimeOut
50 #endif
51 
52 /*
53  * "Ticket": A FIFO spinlock with constant backoff
54  * cf. Algorithms for Scalable Synchronization on Shared-Memory Multiprocessors
55  * by Mellor-Crumney and Scott, 1991
56  */
57 
58 /*
59  * TODO: proportional back-off based on desired-current ticket distance
60  * This has the potential to considerably reduce snoop traffic
61  * but must be tuned carefully
62  * TODO: Evaluate a bias towards the performant clusters on
63  * asymmetric efficient/performant multi-cluster systems, while
64  * retaining the starvation-free property. A small intra-cluster bias may
65  * be profitable for overall throughput
66  */
67 
68 static_assert(sizeof(hw_lck_ticket_t) == 4);
69 static_assert(offsetof(hw_lck_ticket_t, tcurnext) == 2);
70 static_assert(offsetof(hw_lck_ticket_t, cticket) == 2);
71 static_assert(offsetof(hw_lck_ticket_t, nticket) == 3);
72 static_assert(HW_LCK_TICKET_LOCK_VALID_BIT ==
73     (8 * offsetof(hw_lck_ticket_t, lck_valid)));
74 static_assert(HW_LCK_TICKET_LOCK_INCREMENT ==
75     (1u << (8 * offsetof(hw_lck_ticket_t, nticket))));
76 
77 /*
78  * Current ticket size limit--tickets can be trivially expanded
79  * to 16-bits if needed
80  */
81 static_assert(MAX_CPUS < 256);
82 
83 #if DEVELOPMENT || DEBUG
84 __abortlike
85 static void
__hw_lck_invalid_panic(hw_lck_ticket_t * lck)86 __hw_lck_invalid_panic(hw_lck_ticket_t *lck)
87 {
88 	if (lck->lck_type != LCK_TICKET_TYPE) {
89 		panic("Invalid ticket lock %p", lck);
90 	} else {
91 		panic("Ticket lock destroyed %p", lck);
92 	}
93 }
94 #endif /* DEVELOPMENT || DEBUG */
95 
96 static inline void
hw_lck_ticket_verify(hw_lck_ticket_t * lck)97 hw_lck_ticket_verify(hw_lck_ticket_t *lck)
98 {
99 #if DEVELOPMENT || DEBUG
100 	if (lck->lck_type != LCK_TICKET_TYPE) {
101 		__hw_lck_invalid_panic(lck);
102 	}
103 #else
104 	(void)lck;
105 #endif /* DEVELOPMENT || DEBUG */
106 }
107 
108 static inline void
lck_ticket_verify(lck_ticket_t * tlock)109 lck_ticket_verify(lck_ticket_t *tlock)
110 {
111 	hw_lck_ticket_verify(&tlock->tu);
112 #if DEVELOPMENT || DEBUG
113 	if (tlock->lck_tag == LCK_TICKET_TAG_DESTROYED) {
114 		__hw_lck_invalid_panic(&tlock->tu);
115 	}
116 #endif /* DEVELOPMENT || DEBUG */
117 }
118 
119 void
hw_lck_ticket_init(hw_lck_ticket_t * lck,lck_grp_t * grp)120 hw_lck_ticket_init(hw_lck_ticket_t *lck, lck_grp_t *grp)
121 {
122 	assert(((uintptr_t)lck & 3) == 0);
123 	os_atomic_store(lck, ((hw_lck_ticket_t){
124 		.lck_type = LCK_TICKET_TYPE,
125 		.lck_valid = 1,
126 	}), relaxed);
127 
128 #if LOCK_STATS
129 	if (grp) {
130 		lck_grp_reference(grp);
131 		lck_grp_lckcnt_incr(grp, LCK_TYPE_TICKET);
132 	}
133 #endif /* LOCK_STATS */
134 }
135 
136 void
hw_lck_ticket_init_locked(hw_lck_ticket_t * lck,lck_grp_t * grp)137 hw_lck_ticket_init_locked(hw_lck_ticket_t *lck, lck_grp_t *grp)
138 {
139 	assert(((uintptr_t)lck & 3) == 0);
140 
141 	lock_disable_preemption_for_thread(current_thread());
142 
143 	os_atomic_store(lck, ((hw_lck_ticket_t){
144 		.lck_type = LCK_TICKET_TYPE,
145 		.lck_valid = 1,
146 		.nticket = 1,
147 	}), relaxed);
148 
149 #if LOCK_STATS
150 	if (grp) {
151 		lck_grp_reference(grp);
152 		lck_grp_lckcnt_incr(grp, LCK_TYPE_TICKET);
153 	}
154 #endif /* LOCK_STATS */
155 }
156 
157 void
lck_ticket_init(lck_ticket_t * tlock,__unused lck_grp_t * grp)158 lck_ticket_init(lck_ticket_t *tlock, __unused lck_grp_t *grp)
159 {
160 	memset(tlock, 0, sizeof(*tlock));
161 	hw_lck_ticket_init(&tlock->tu, grp);
162 }
163 
164 static inline void
hw_lck_ticket_destroy_internal(hw_lck_ticket_t * lck,bool keep_type,bool sync LCK_GRP_ARG (lck_grp_t * grp))165 hw_lck_ticket_destroy_internal(hw_lck_ticket_t *lck, bool keep_type, bool sync
166     LCK_GRP_ARG(lck_grp_t *grp))
167 {
168 	__assert_only hw_lck_ticket_t tmp;
169 
170 	if (keep_type) {
171 		/*
172 		 * Wait queues still need the lock to stay functional after
173 		 * they "destroy" it, so those keep their "type",
174 		 * and are just invalidated.
175 		 */
176 		os_atomic_store(&lck->lck_valid, (uint8_t)0, relaxed);
177 	}
178 
179 	tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed);
180 
181 	if (__improbable(sync && !tmp.lck_valid && tmp.nticket != tmp.cticket)) {
182 		/*
183 		 * If the lock has been invalidated and there are pending
184 		 * reservations.  hw_lck_ticket_lock_allow_invalid() is possibly
185 		 * being used, and such caller do not guarantee the liveness
186 		 * of the object they try to lock, we need to flush their
187 		 * reservations before proceeding.
188 		 *
189 		 * Because the lock is FIFO, we go through a cycle of
190 		 * locking/unlocking which will have this effect, because
191 		 * the lock is now invalid, new calls to
192 		 * hw_lck_ticket_lock_allow_invalid() will fail before taking
193 		 * a reservation, and we can safely destroy the lock.
194 		 */
195 		hw_lck_ticket_lock(lck, grp);
196 		hw_lck_ticket_unlock(lck);
197 	}
198 
199 	if (!keep_type) {
200 		os_atomic_store(&lck->lck_value, 0U, relaxed);
201 	}
202 
203 #if LOCK_STATS
204 	if (grp) {
205 		lck_grp_lckcnt_decr(grp, LCK_TYPE_TICKET);
206 		lck_grp_deallocate(grp);
207 	}
208 #endif /* LOCK_STATS */
209 }
210 
211 void
hw_lck_ticket_destroy(hw_lck_ticket_t * lck,bool keep_type,lck_grp_t * grp)212 hw_lck_ticket_destroy(hw_lck_ticket_t *lck, bool keep_type, lck_grp_t *grp)
213 {
214 	hw_lck_ticket_verify(lck);
215 	hw_lck_ticket_destroy_internal(lck, keep_type, true LCK_GRP_ARG(grp));
216 }
217 
218 void
lck_ticket_destroy(lck_ticket_t * tlock,__unused lck_grp_t * grp)219 lck_ticket_destroy(lck_ticket_t *tlock, __unused lck_grp_t *grp)
220 {
221 	lck_ticket_verify(tlock);
222 	assert(tlock->lck_owner == 0);
223 	tlock->lck_tag = LCK_TICKET_TAG_DESTROYED;
224 	hw_lck_ticket_destroy_internal(&tlock->tu, true, false LCK_GRP_ARG(grp));
225 }
226 
227 bool
hw_lck_ticket_held(hw_lck_ticket_t * lck)228 hw_lck_ticket_held(hw_lck_ticket_t *lck)
229 {
230 	hw_lck_ticket_t tmp;
231 	tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed);
232 	return tmp.cticket != tmp.nticket;
233 }
234 
235 bool
kdp_lck_ticket_is_acquired(lck_ticket_t * lck)236 kdp_lck_ticket_is_acquired(lck_ticket_t *lck)
237 {
238 	if (not_in_kdp) {
239 		panic("panic: ticket lock acquired check done outside of kernel debugger");
240 	}
241 	return hw_lck_ticket_held(&lck->tu);
242 }
243 
244 static inline void
tlock_mark_owned(lck_ticket_t * tlock,thread_t cthread)245 tlock_mark_owned(lck_ticket_t *tlock, thread_t cthread)
246 {
247 	/*
248 	 * There is a small pre-emption disabled window (also interrupts masked
249 	 * for the pset lock) between the acquisition of the lock and the
250 	 * population of the advisory 'owner' thread field
251 	 * On architectures with a DCAS (ARM v8.1 or x86), conceivably we could
252 	 * populate the next ticket and the thread atomically, with
253 	 * possible overhead, potential loss of micro-architectural fwd progress
254 	 * properties of an unconditional fetch-add, and a 16 byte alignment requirement.
255 	 */
256 	assert(tlock->lck_owner == 0);
257 	os_atomic_store(&tlock->lck_owner, (uintptr_t)cthread, relaxed);
258 }
259 
260 __abortlike
261 static hw_lock_timeout_status_t
hw_lck_ticket_timeout_panic(void * _lock,uint64_t timeout,uint64_t start,uint64_t now,uint64_t interrupt_time)262 hw_lck_ticket_timeout_panic(void *_lock, uint64_t timeout, uint64_t start, uint64_t now, uint64_t interrupt_time)
263 {
264 #pragma unused(interrupt_time)
265 
266 	lck_spinlock_to_info_t lsti;
267 	hw_lck_ticket_t *lck = _lock;
268 	hw_lck_ticket_t tmp;
269 
270 	lsti = lck_spinlock_timeout_hit(lck, 0);
271 	tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed);
272 
273 	panic("Ticket spinlock[%p] timeout after %llu ticks; "
274 	    "cticket: 0x%x, nticket: 0x%x, waiting for 0x%x, "
275 #if INTERRUPT_MASKED_DEBUG
276 	    "interrupt time: %llu, "
277 #endif /* INTERRUPT_MASKED_DEBUG */
278 	    "start time: %llu, now: %llu, timeout: %llu",
279 	    lck, now - start, tmp.cticket, tmp.nticket, lsti->extra,
280 #if INTERRUPT_MASKED_DEBUG
281 	    interrupt_time,
282 #endif /* INTERRUPT_MASKED_DEBUG */
283 	    start, now, timeout);
284 }
285 
286 __abortlike
287 static hw_lock_timeout_status_t
lck_ticket_timeout_panic(void * _lock,uint64_t timeout,uint64_t start,uint64_t now,uint64_t interrupt_time)288 lck_ticket_timeout_panic(void *_lock, uint64_t timeout, uint64_t start, uint64_t now, uint64_t interrupt_time)
289 {
290 #pragma unused(interrupt_time)
291 	lck_spinlock_to_info_t lsti;
292 	hw_lck_ticket_t *lck = _lock;
293 	lck_ticket_t *tlock = __container_of(lck, lck_ticket_t, tu);
294 	hw_lck_ticket_t tmp;
295 
296 	lsti = lck_spinlock_timeout_hit(lck, tlock->lck_owner);
297 	tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed);
298 
299 	panic("Ticket spinlock[%p] timeout after %llu ticks; "
300 	    "cticket: 0x%x, nticket: 0x%x, waiting for 0x%x, "
301 	    "current owner: %p (on CPU %d), "
302 #if DEBUG || DEVELOPMENT
303 	    "orig owner: %p, "
304 #endif /* DEBUG || DEVELOPMENT */
305 #if INTERRUPT_MASKED_DEBUG
306 	    "interrupt time: %llu, "
307 #endif /* INTERRUPT_MASKED_DEBUG */
308 	    "start time: %llu, now: %llu, timeout: %llu",
309 	    tlock, now - start, tmp.cticket, tmp.nticket, lsti->extra,
310 	    (void *)lsti->owner_thread_cur, lsti->owner_cpu,
311 #if DEBUG || DEVELOPMENT
312 	    (void *)lsti->owner_thread_orig,
313 #endif /* DEBUG || DEVELOPMENT */
314 #if INTERRUPT_MASKED_DEBUG
315 	    interrupt_time,
316 #endif /* INTERRUPT_MASKED_DEBUG */
317 	    start, now, timeout);
318 }
319 
320 static inline void
hw_lck_ticket_unlock_internal(hw_lck_ticket_t * lck)321 hw_lck_ticket_unlock_internal(hw_lck_ticket_t *lck)
322 {
323 	_Atomic uint8_t *ctp = (_Atomic uint8_t *)&lck->cticket;
324 	uint8_t cticket;
325 
326 	/*
327 	 * Do not use os_atomic* here, we want non volatile atomics
328 	 * so that the compiler can codegen an `incb` on Intel.
329 	 */
330 	cticket = atomic_load_explicit(ctp, memory_order_relaxed);
331 	atomic_store_explicit(ctp, cticket + 1, memory_order_release);
332 #if __arm__
333 	set_event();
334 #endif  // __arm__
335 #if CONFIG_DTRACE
336 	LOCKSTAT_RECORD(LS_LCK_TICKET_LOCK_RELEASE, lck);
337 #endif /* CONFIG_DTRACE */
338 	lock_enable_preemption();
339 }
340 
341 struct hw_lck_ticket_reserve_arg {
342 	uint8_t mt;
343 	bool    validate;
344 };
345 
346 /*
347  * On contention, poll for ownership
348  * Returns when the current ticket is observed equal to "mt"
349  */
350 __result_use_check
351 static hw_lock_status_t __attribute__((noinline))
hw_lck_ticket_contended(hw_lck_ticket_t * lck,thread_t cthread,struct hw_lck_ticket_reserve_arg arg,uint64_t timeout,hw_lock_timeout_handler_t handler LCK_GRP_ARG (lck_grp_t * grp))352 hw_lck_ticket_contended(hw_lck_ticket_t *lck, thread_t cthread, struct hw_lck_ticket_reserve_arg arg,
353     uint64_t timeout, hw_lock_timeout_handler_t handler LCK_GRP_ARG(lck_grp_t *grp))
354 {
355 #pragma unused(cthread)
356 
357 	uint64_t end = 0, start = 0, interrupts = 0;
358 	uint64_t default_timeout = TICKET_LOCK_PANIC_TIMEOUT;
359 	bool     has_timeout = timeout > 0 || default_timeout > 0;
360 
361 	uint8_t  cticket;
362 	uint8_t  mt = arg.mt;
363 
364 #if CONFIG_DTRACE || LOCK_STATS
365 	uint64_t begin = 0;
366 	boolean_t stat_enabled = lck_grp_ticket_spin_enabled(lck LCK_GRP_ARG(grp));
367 
368 	if (__improbable(stat_enabled)) {
369 		begin = mach_absolute_time();
370 	}
371 #endif /* CONFIG_DTRACE || LOCK_STATS */
372 
373 	if (has_timeout && timeout == 0) {
374 		timeout = default_timeout;
375 	}
376 #if INTERRUPT_MASKED_DEBUG
377 	bool measure_interrupts = !pmap_in_ppl() && ml_get_interrupts_enabled();
378 	uint64_t start_interrupts = 0;
379 #endif /* INTERRUPT_MASKED_DEBUG */
380 	for (;;) {
381 		for (int i = 0; i < LOCK_SNOOP_SPINS; i++) {
382 #if OS_ATOMIC_HAS_LLSC
383 			cticket = os_atomic_load_exclusive(&lck->cticket, acquire);
384 			if (__improbable(cticket != mt)) {
385 				wait_for_event();
386 				continue;
387 			}
388 			os_atomic_clear_exclusive();
389 #elif defined(__x86_64__)
390 			__builtin_ia32_pause();
391 			cticket = os_atomic_load(&lck->cticket, acquire);
392 			if (__improbable(cticket != mt)) {
393 				continue;
394 			}
395 #else
396 #error unsupported architecture
397 #endif
398 
399 			/*
400 			 * We now have successfully acquired the lock
401 			 */
402 
403 #if CONFIG_DTRACE || LOCK_STATS
404 			if (__improbable(stat_enabled)) {
405 				lck_grp_ticket_update_spin(lck LCK_GRP_ARG(grp),
406 				    mach_absolute_time() - begin);
407 			}
408 			lck_grp_ticket_update_miss(lck LCK_GRP_ARG(grp));
409 			lck_grp_ticket_update_held(lck LCK_GRP_ARG(grp));
410 #endif /* CONFIG_DTRACE || LOCK_STATS */
411 			if (__improbable(arg.validate && !lck->lck_valid)) {
412 				/*
413 				 * We got the lock, however the caller is
414 				 * hw_lck_ticket_lock_allow_invalid() and the
415 				 * lock has been invalidated while we were
416 				 * waiting for our turn.
417 				 *
418 				 * We need to unlock and pretend we failed.
419 				 */
420 				hw_lck_ticket_unlock_internal(lck);
421 				return HW_LOCK_INVALID;
422 			}
423 
424 			return HW_LOCK_ACQUIRED;
425 		}
426 
427 		if (has_timeout) {
428 			uint64_t now = ml_get_timebase();
429 			if (end == 0) {
430 #if INTERRUPT_MASKED_DEBUG
431 				if (measure_interrupts) {
432 					start_interrupts = cthread->machine.int_time_mt;
433 				}
434 #endif /* INTERRUPT_MASKED_DEBUG */
435 				start = now;
436 				end = now + timeout;
437 				/* remember the droid we're looking for */
438 				PERCPU_GET(lck_spinlock_to_info)->extra = mt;
439 			} else if (now < end) {
440 				/* keep spinning */
441 			} else {
442 #if INTERRUPT_MASKED_DEBUG
443 				if (measure_interrupts) {
444 					interrupts = cthread->machine.int_time_mt - start_interrupts;
445 				}
446 #endif /* INTERRUPT_MASKED_DEBUG */
447 				if (handler(lck, timeout, start, now, interrupts)) {
448 					/* push the deadline */
449 					end += timeout;
450 				} else {
451 					break;
452 				}
453 			}
454 		}
455 	}
456 
457 #if CONFIG_DTRACE || LOCK_STATS
458 	if (__improbable(stat_enabled)) {
459 		lck_grp_ticket_update_spin(lck LCK_GRP_ARG(grp),
460 		    mach_absolute_time() - begin);
461 	}
462 	lck_grp_ticket_update_miss(lck LCK_GRP_ARG(grp));
463 #endif /* CONFIG_DTRACE || LOCK_STATS */
464 	return HW_LOCK_CONTENDED;
465 }
466 
467 static void __attribute__((noinline))
lck_ticket_contended(lck_ticket_t * tlock,uint8_t mt,thread_t cthread LCK_GRP_ARG (lck_grp_t * grp))468 lck_ticket_contended(lck_ticket_t *tlock, uint8_t mt, thread_t cthread
469     LCK_GRP_ARG(lck_grp_t *grp))
470 {
471 	assertf(tlock->lck_owner != (uintptr_t) cthread,
472 	    "Recursive ticket lock, owner: %p, current thread: %p",
473 	    (void *) tlock->lck_owner, (void *) cthread);
474 
475 	struct hw_lck_ticket_reserve_arg arg = { .mt = mt };
476 	lck_spinlock_timeout_set_orig_owner(tlock->lck_owner);
477 	(void)hw_lck_ticket_contended(&tlock->tu, cthread, arg, 0,
478 	    lck_ticket_timeout_panic LCK_GRP_ARG(grp));
479 	tlock_mark_owned(tlock, cthread);
480 }
481 
482 static inline hw_lck_ticket_t
hw_lck_ticket_reserve_orig(hw_lck_ticket_t * lck,thread_t cthread __unused)483 hw_lck_ticket_reserve_orig(hw_lck_ticket_t *lck, thread_t cthread __unused)
484 {
485 	hw_lck_ticket_t tmp;
486 
487 	lock_disable_preemption_for_thread(cthread);
488 	/*
489 	 * Atomically load both the current and next ticket, and increment the
490 	 * latter. Wrap of the ticket field is OK as long as the total
491 	 * number of contending CPUs is < maximum ticket
492 	 */
493 	tmp.tcurnext = os_atomic_add_orig(&lck->tcurnext,
494 	    1U << (8 * sizeof(lck->cticket)), acquire);
495 
496 	return tmp;
497 }
498 
499 void
hw_lck_ticket_lock(hw_lck_ticket_t * lck,lck_grp_t * grp)500 hw_lck_ticket_lock(hw_lck_ticket_t *lck, lck_grp_t *grp)
501 {
502 	thread_t cthread = current_thread();
503 	hw_lck_ticket_t tmp;
504 
505 	hw_lck_ticket_verify(lck);
506 	tmp = hw_lck_ticket_reserve_orig(lck, cthread);
507 
508 	if (__probable(tmp.cticket == tmp.nticket)) {
509 		return lck_grp_ticket_update_held(lck LCK_GRP_ARG(grp));
510 	}
511 
512 	/* Contention? branch to out of line contended block */
513 	struct hw_lck_ticket_reserve_arg arg = { .mt = tmp.nticket };
514 	lck_spinlock_timeout_set_orig_owner(0);
515 	(void)hw_lck_ticket_contended(lck, cthread, arg, 0,
516 	    hw_lck_ticket_timeout_panic LCK_GRP_ARG(grp));
517 }
518 
519 hw_lock_status_t
hw_lck_ticket_lock_to(hw_lck_ticket_t * lck,uint64_t timeout,hw_lock_timeout_handler_t handler,lck_grp_t * grp)520 hw_lck_ticket_lock_to(hw_lck_ticket_t *lck, uint64_t timeout,
521     hw_lock_timeout_handler_t handler, lck_grp_t *grp)
522 {
523 	thread_t cthread = current_thread();
524 	hw_lck_ticket_t tmp;
525 
526 	hw_lck_ticket_verify(lck);
527 	tmp = hw_lck_ticket_reserve_orig(lck, cthread);
528 
529 	if (__probable(tmp.cticket == tmp.nticket)) {
530 		lck_grp_ticket_update_held(lck LCK_GRP_ARG(grp));
531 		return HW_LOCK_ACQUIRED;
532 	}
533 
534 	/* Contention? branch to out of line contended block */
535 	struct hw_lck_ticket_reserve_arg arg = { .mt = tmp.nticket };
536 	lck_spinlock_timeout_set_orig_owner(0);
537 	return hw_lck_ticket_contended(lck, cthread, arg, timeout,
538 	           handler LCK_GRP_ARG(grp));
539 }
540 
541 void
lck_ticket_lock(lck_ticket_t * tlock,__unused lck_grp_t * grp)542 lck_ticket_lock(lck_ticket_t *tlock, __unused lck_grp_t *grp)
543 {
544 	thread_t cthread = current_thread();
545 	hw_lck_ticket_t tmp;
546 
547 	lck_ticket_verify(tlock);
548 	tmp = hw_lck_ticket_reserve_orig(&tlock->tu, cthread);
549 
550 	if (__probable(tmp.cticket == tmp.nticket)) {
551 		tlock_mark_owned(tlock, cthread);
552 		return lck_grp_ticket_update_held(&tlock->tu LCK_GRP_ARG(grp));
553 	}
554 
555 	/* Contention? branch to out of line contended block */
556 	lck_ticket_contended(tlock, tmp.nticket, cthread LCK_GRP_ARG(grp));
557 }
558 
559 int
hw_lck_ticket_lock_try(hw_lck_ticket_t * lck,lck_grp_t * grp)560 hw_lck_ticket_lock_try(hw_lck_ticket_t *lck, lck_grp_t *grp)
561 {
562 	hw_lck_ticket_t olck, nlck;
563 
564 	hw_lck_ticket_verify(lck);
565 	lock_disable_preemption_for_thread(current_thread());
566 
567 	os_atomic_rmw_loop(&lck->tcurnext, olck.tcurnext, nlck.tcurnext, acquire, {
568 		if (__improbable(olck.cticket != olck.nticket)) {
569 		        os_atomic_rmw_loop_give_up({
570 				lock_enable_preemption();
571 				return false;
572 			});
573 		}
574 		nlck.cticket = olck.cticket;
575 		nlck.nticket = olck.nticket + 1;
576 	});
577 
578 	lck_grp_ticket_update_held(lck LCK_GRP_ARG(grp));
579 	return true;
580 }
581 
582 int
lck_ticket_lock_try(lck_ticket_t * tlock,__unused lck_grp_t * grp)583 lck_ticket_lock_try(lck_ticket_t *tlock, __unused lck_grp_t *grp)
584 {
585 	thread_t cthread = current_thread();
586 	hw_lck_ticket_t olck, nlck;
587 
588 	lck_ticket_verify(tlock);
589 	lock_disable_preemption_for_thread(cthread);
590 
591 	os_atomic_rmw_loop(&tlock->tu.tcurnext, olck.tcurnext, nlck.tcurnext, acquire, {
592 		if (__improbable(olck.cticket != olck.nticket)) {
593 		        os_atomic_rmw_loop_give_up({
594 				lock_enable_preemption();
595 				return false;
596 			});
597 		}
598 		nlck.cticket = olck.cticket;
599 		nlck.nticket = olck.nticket + 1;
600 	});
601 
602 	tlock_mark_owned(tlock, cthread);
603 	lck_grp_ticket_update_held(&tlock->tu LCK_GRP_ARG(grp));
604 	return true;
605 }
606 
607 /*
608  * Returns a "reserved" lock or a lock where `lck_valid` is 0.
609  *
610  * More or less equivalent to this:
611  *
612  *	hw_lck_ticket_t
613  *	hw_lck_ticket_lock_allow_invalid(hw_lck_ticket_t *lck)
614  *	{
615  *		hw_lck_ticket_t o, n;
616  *
617  *		os_atomic_rmw_loop(lck, o, n, acquire, {
618  *			if (__improbable(!o.lck_valid)) {
619  *				os_atomic_rmw_loop_give_up({
620  *					return (hw_lck_ticket_t){ 0 };
621  *				});
622  *			}
623  *			n = o;
624  *			n.nticket++;
625  *		});
626  *		return o;
627  *	}
628  */
629 extern hw_lck_ticket_t
630 hw_lck_ticket_reserve_orig_allow_invalid(hw_lck_ticket_t *lck);
631 
632 hw_lock_status_t
hw_lck_ticket_lock_allow_invalid(hw_lck_ticket_t * lck,uint64_t timeout,hw_lock_timeout_handler_t handler,lck_grp_t * grp)633 hw_lck_ticket_lock_allow_invalid(hw_lck_ticket_t *lck, uint64_t timeout,
634     hw_lock_timeout_handler_t handler, lck_grp_t *grp)
635 {
636 	hw_lck_ticket_t tmp;
637 	thread_t cthread = current_thread();
638 
639 	lock_disable_preemption_for_thread(cthread);
640 
641 	tmp = hw_lck_ticket_reserve_orig_allow_invalid(lck);
642 	if (__improbable(!tmp.lck_valid)) {
643 		lock_enable_preemption();
644 		return HW_LOCK_INVALID;
645 	}
646 
647 	if (__probable(tmp.cticket == tmp.nticket)) {
648 		lck_grp_ticket_update_held(lck LCK_GRP_ARG(grp));
649 		return HW_LOCK_ACQUIRED;
650 	}
651 
652 	/* Contention? branch to out of line contended block */
653 	struct hw_lck_ticket_reserve_arg arg = {
654 		.mt = tmp.nticket,
655 		.validate = true,
656 	};
657 	lck_spinlock_timeout_set_orig_owner(0);
658 	return hw_lck_ticket_contended(lck, cthread, arg, timeout,
659 	           handler LCK_GRP_ARG(grp));
660 }
661 
662 void
hw_lck_ticket_invalidate(hw_lck_ticket_t * lck)663 hw_lck_ticket_invalidate(hw_lck_ticket_t *lck)
664 {
665 	hw_lck_ticket_t tmp = { .lck_valid = 1 };
666 
667 	os_atomic_andnot(&lck->lck_value, tmp.lck_value, relaxed);
668 }
669 
670 void
hw_lck_ticket_unlock(hw_lck_ticket_t * lck)671 hw_lck_ticket_unlock(hw_lck_ticket_t *lck)
672 {
673 	hw_lck_ticket_verify(lck);
674 #if MACH_ASSERT
675 	hw_lck_ticket_t tmp;
676 	tmp.lck_value = os_atomic_load(&lck->lck_value, relaxed);
677 	assertf(tmp.cticket != tmp.nticket,
678 	    "Ticket lock %p is not locked (0x%08x)", lck, tmp.lck_value);
679 #endif /* MACH_ASSERT */
680 	hw_lck_ticket_unlock_internal(lck);
681 }
682 
683 void
lck_ticket_unlock(lck_ticket_t * tlock)684 lck_ticket_unlock(lck_ticket_t *tlock)
685 {
686 	lck_ticket_verify(tlock);
687 
688 	assertf(tlock->lck_owner == (uintptr_t)current_thread(),
689 	    "Ticket unlock non-owned, owner: %p", (void *) tlock->lck_owner);
690 	os_atomic_store(&tlock->lck_owner, 0, relaxed);
691 
692 	hw_lck_ticket_unlock_internal(&tlock->tu);
693 }
694 
695 void
lck_ticket_assert_owned(__assert_only lck_ticket_t * tlock)696 lck_ticket_assert_owned(__assert_only lck_ticket_t *tlock)
697 {
698 #if MACH_ASSERT
699 	thread_t self, owner;
700 
701 	owner = (thread_t)os_atomic_load(&tlock->lck_owner, relaxed);
702 	self = current_thread();
703 	assertf(owner == self, "lck_ticket_assert_owned: owner %p, current: %p",
704 	    owner, self);
705 #endif /* MACH_ASSERT */
706 }
707