1 /*
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/assert.h>
30 #include <kern/epoch_sync.h>
31 #include <kern/kalloc.h>
32 #include <kern/locks.h>
33 #include <kern/sched_prim.h>
34 #include <kern/turnstile.h>
35
36 #include <os/atomic.h>
37 #include <os/hash.h>
38 #include <os/overflow.h>
39
40 #include <sys/kdebug.h>
41
42 #include <stdint.h>
43
44 #define ES_INVALID_ID UINT64_MAX
45
46 static LCK_GRP_DECLARE(esync_lckgrp, "esync");
47 os_refgrp_decl(static, esync_refgrp, "esync", NULL);
48
49 typedef struct {
50 uint64_t es_id; /* Synchronization ID. */
51 struct turnstile *es_turnstile; /* Associated turnstile. */
52 esync_policy_t es_policy; /* Determines turnstile policy. */
53 lck_spin_t es_lock; /* Interlock. */
54 os_refcnt_t es_refcnt; /* Reference count for lifecycle. */
55 queue_chain_t es_link; /* Link for hash table. */
56 } esync_t;
57
58 #pragma mark - Hash Table Implementation -
59
60 static LCK_GRP_DECLARE(ht_lck_grp, "ht");
61
62 typedef struct {
63 queue_head_t htb_head;
64 lck_spin_t htb_lock;
65 } ht_bucket_t;
66
67 typedef struct ht {
68 const uint32_t ht_size;
69 ht_bucket_t *ht_bucket;
70 } ht_t;
71
72 /*
73 * Eventually it would be better to have "clients" just dynamically allocate
74 * these as needed and not only support two static ID spaces.
75 */
76
77 #define NBUCKETS_QUEUE 512
78 static ht_t exclaves_queue_ht = {
79 .ht_size = NBUCKETS_QUEUE,
80 .ht_bucket = &(ht_bucket_t[NBUCKETS_QUEUE]){}[0],
81 };
82
83 #define NBUCKETS_THREAD 64
84 static ht_t exclaves_thread_ht = {
85 .ht_size = NBUCKETS_THREAD,
86 .ht_bucket = &(ht_bucket_t[NBUCKETS_THREAD]){}[0],
87 };
88
89 #if DEVELOPMENT || DEBUG
90
91 #define NBUCKETS_TEST 8
92 static ht_t esync_test_ht = {
93 .ht_size = NBUCKETS_TEST,
94 .ht_bucket = &(ht_bucket_t[NBUCKETS_TEST]){}[0],
95 };
96 #endif /* DEVELOPMENT || DEBUG */
97
98
99 static ht_t *
space_to_ht(esync_space_t space)100 space_to_ht(esync_space_t space)
101 {
102 switch (space) {
103 case ESYNC_SPACE_EXCLAVES_Q:
104 return &exclaves_queue_ht;
105
106 case ESYNC_SPACE_EXCLAVES_T:
107 return &exclaves_thread_ht;
108
109 #if DEVELOPMENT || DEBUG
110 case ESYNC_SPACE_TEST:
111 return &esync_test_ht;
112 #endif /* DEVELOPMENT || DEBUG */
113
114 default:
115 panic("unknown epoch sync space");
116 }
117 }
118
119 static __startup_func void
ht_startup_init(ht_t * ht)120 ht_startup_init(ht_t *ht)
121 {
122 for (uint32_t i = 0; i < ht->ht_size; i++) {
123 queue_init(&ht->ht_bucket[i].htb_head);
124 lck_spin_init(&ht->ht_bucket[i].htb_lock, &ht_lck_grp, NULL);
125 }
126 }
127 STARTUP_ARG(LOCKS, STARTUP_RANK_LAST, ht_startup_init, &exclaves_queue_ht);
128 STARTUP_ARG(LOCKS, STARTUP_RANK_LAST, ht_startup_init, &exclaves_thread_ht);
129
130 static inline ht_bucket_t *
ht_get_bucket(ht_t * ht,const uint64_t key)131 ht_get_bucket(ht_t *ht, const uint64_t key)
132 {
133 assert3u((ht->ht_size & (ht->ht_size - 1)), ==, 0);
134
135 const uint32_t idx = os_hash_jenkins(&key, sizeof(key)) & (ht->ht_size - 1);
136 return &ht->ht_bucket[idx];
137 }
138
139 static esync_t *
ht_put(ht_t * ht,const uint64_t key,esync_t * new_value)140 ht_put(ht_t *ht, const uint64_t key, esync_t *new_value)
141 {
142 /* 'new_value' shouldn't be part of an existing queue. */
143 assert3p(new_value->es_link.next, ==, NULL);
144 assert3p(new_value->es_link.prev, ==, NULL);
145
146 ht_bucket_t *bucket = ht_get_bucket(ht, key);
147
148 lck_spin_lock_grp(&bucket->htb_lock, &ht_lck_grp);
149
150 esync_t *value = NULL;
151 esync_t *elem = NULL;
152 qe_foreach_element(elem, &bucket->htb_head, es_link) {
153 if (elem->es_id != key) {
154 continue;
155 }
156
157 lck_spin_lock_grp(&elem->es_lock, &esync_lckgrp);
158 if (elem->es_id == key) {
159 value = elem;
160 break;
161 }
162 lck_spin_unlock(&elem->es_lock);
163 }
164
165 if (value == NULL) {
166 value = new_value;
167 lck_spin_lock_grp(&value->es_lock, &esync_lckgrp);
168 enqueue(&bucket->htb_head, &value->es_link);
169 }
170
171 lck_spin_unlock(&bucket->htb_lock);
172
173 return value;
174 }
175
176 static void
ht_remove(ht_t * ht,const uint64_t key,esync_t * value)177 ht_remove(ht_t *ht, const uint64_t key, esync_t *value)
178 {
179 ht_bucket_t *bucket = ht_get_bucket(ht, key);
180
181 lck_spin_lock_grp(&bucket->htb_lock, &ht_lck_grp);
182 remqueue(&value->es_link);
183 lck_spin_unlock(&bucket->htb_lock);
184
185 assert3p(value->es_link.next, ==, NULL);
186 assert3p(value->es_link.prev, ==, NULL);
187 }
188
189 static esync_t *
ht_get(ht_t * ht,const uint64_t key)190 ht_get(ht_t *ht, const uint64_t key)
191 {
192 ht_bucket_t *bucket = ht_get_bucket(ht, key);
193
194 lck_spin_lock_grp(&bucket->htb_lock, &ht_lck_grp);
195
196 esync_t *value = NULL;
197 esync_t *elem = NULL;
198 qe_foreach_element(elem, &bucket->htb_head, es_link) {
199 if (elem->es_id != key) {
200 continue;
201 }
202
203 lck_spin_lock_grp(&elem->es_lock, &esync_lckgrp);
204 if (elem->es_id == key) {
205 value = elem;
206 break;
207 }
208 lck_spin_unlock(&elem->es_lock);
209 }
210
211 lck_spin_unlock(&bucket->htb_lock);
212
213 return value;
214 }
215
216 #pragma mark - Epoch Sync Implementation -
217
218 /*
219 * Allocate a backing object.
220 */
221 static esync_t *
esync_alloc(const uint64_t id,const esync_policy_t policy)222 esync_alloc(const uint64_t id, const esync_policy_t policy)
223 {
224 assert3u(id, !=, ES_INVALID_ID);
225
226 esync_t *sync = kalloc_type(esync_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
227 assert3p(sync, !=, NULL);
228
229 sync->es_id = id;
230 sync->es_turnstile = TURNSTILE_NULL;
231 sync->es_policy = policy;
232
233 lck_spin_init(&sync->es_lock, &esync_lckgrp, NULL);
234
235 os_ref_init_count(&sync->es_refcnt, &esync_refgrp, 1);
236
237 return sync;
238 }
239
240 /*
241 * Free a backing object.
242 */
243 static void
esync_free(esync_t * sync)244 esync_free(esync_t *sync)
245 {
246 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_NOTOWNED);
247 assert3p(sync->es_turnstile, ==, TURNSTILE_NULL);
248 assert3u(os_ref_get_count(&sync->es_refcnt), ==, 0);
249
250 lck_spin_destroy(&sync->es_lock, &esync_lckgrp);
251
252 kfree_type(esync_t, sync);
253 }
254
255 /*
256 * Stop using 'sync'. Drop the ref count and possibly remove it from the hash
257 * table and free it. Free up an unused entry if not NULL.
258 * Called with the object locked.
259 */
260 static void
esync_put(ht_t * ht,esync_t * sync,esync_t * to_be_freed)261 esync_put(ht_t *ht, esync_t *sync, esync_t *to_be_freed)
262 {
263 os_ref_count_t cnt = 0;
264
265 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
266
267 /* The last owner will remove it from the hash table. */
268 cnt = os_ref_get_count(&sync->es_refcnt);
269 if (cnt == 2) {
270 /*
271 * Make sure no other thread will match it during the window
272 * where the lock is dropped but before it's been removed from
273 * the hash table (lookups are protected by es_lock as called
274 * from esync_acquire).
275 */
276 const uint64_t id = sync->es_id;
277 sync->es_id = ES_INVALID_ID;
278 lck_spin_unlock(&sync->es_lock);
279
280 ht_remove(ht, id, sync);
281
282 /* Drop the ref associated with the hash table. */
283 (void) os_ref_release(&sync->es_refcnt);
284
285 /* Drop the final refcnt and free it. */
286 cnt = os_ref_release(&sync->es_refcnt);
287 assert3u(cnt, ==, 0);
288
289 /*
290 * Before freeing (and potentially taking another lock), call
291 * turnstile_cleanup().
292 */
293 turnstile_cleanup();
294 esync_free(sync);
295 } else {
296 cnt = os_ref_release_locked(&sync->es_refcnt);
297 assert3u(cnt, >=, 2);
298 lck_spin_unlock(&sync->es_lock);
299 turnstile_cleanup();
300 }
301
302 /* An unused entry, free it. */
303 if (to_be_freed != NULL) {
304 cnt = os_ref_release(&to_be_freed->es_refcnt);
305 assert3u(cnt, ==, 0);
306 esync_free(to_be_freed);
307 }
308 }
309
310 /*
311 * Get an object associated with 'id'. If there isn't one already, allocate one
312 * and insert it.
313 * Returns with the object locked and a +1 on the refcount.
314 */
315 static esync_t *
esync_get(ht_t * ht,const uint64_t id,const esync_policy_t policy,esync_t ** const to_be_freed)316 esync_get(ht_t *ht, const uint64_t id, const esync_policy_t policy,
317 esync_t **const to_be_freed)
318 {
319 esync_t *new = esync_alloc(id, policy);
320 esync_t *sync = ht_put(ht, id, new);
321
322 /*
323 * See if the newly allocated entry was inserted. If so, then there's
324 * nothing extra to clean up later (in case cleanup is needed, it must
325 * be done later as the spinlock is held at this point).
326 * ht_put consumes the refcount of new if the entry was inserted.
327 */
328 *to_be_freed = (sync != new) ? new : NULL;
329
330 /*
331 * The policy of the sync object should always match. i.e. the
332 * consumer of the esync interfaces must guarantee that all waiters use
333 * the same policy.
334 */
335 assert3u(sync->es_policy, ==, policy);
336
337 os_ref_retain_locked(&sync->es_refcnt);
338
339 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
340 return sync;
341 }
342
343 /*
344 * Update the epoch counter with a new epoch.
345 * Returns true if the epoch was newer or equal to the existing epoch.
346 */
347 static bool
esync_update_epoch(const uint64_t epoch,os_atomic (uint64_t)* counter)348 esync_update_epoch(const uint64_t epoch, os_atomic(uint64_t) *counter)
349 {
350 uint64_t old, new;
351
352 return os_atomic_rmw_loop(counter, old, new, acq_rel, {
353 if (old > epoch) {
354 os_atomic_rmw_loop_give_up();
355 }
356 new = epoch;
357 }) == 1;
358 }
359
360 /*
361 * Block until esync_wake() is called on this id.
362 * The epoch is incremented by the client on wakes. If the epoch is stale, then
363 * don't block and return immediately.
364 * Can allocate a new epoch synchronization object if needed.
365 * Will only use "owner" if the epoch is fresh.
366 */
367 wait_result_t
esync_wait(esync_space_t space,const uint64_t id,const uint64_t epoch,os_atomic (uint64_t)* counter,const ctid_t owner_ctid,const esync_policy_t policy,const wait_interrupt_t interruptible)368 esync_wait(esync_space_t space, const uint64_t id, const uint64_t epoch,
369 os_atomic(uint64_t) *counter, const ctid_t owner_ctid,
370 const esync_policy_t policy, const wait_interrupt_t interruptible)
371 {
372 assert3u(id, !=, ES_INVALID_ID);
373
374 ht_t *ht = space_to_ht(space);
375
376 esync_t *to_be_freed = NULL;
377 esync_t *sync = esync_get(ht, id, policy, &to_be_freed);
378
379 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
380
381 const bool fresh_epoch = esync_update_epoch(epoch, counter);
382 if (!fresh_epoch) {
383 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC,
384 MACH_EPOCH_SYNC_WAIT_STALE), space, id, epoch, NULL);
385 esync_put(ht, sync, to_be_freed);
386 return THREAD_NOT_WAITING;
387 }
388
389 /*
390 * It is safe to lookup the thread from the ctid here as the lock is
391 * held and the epoch is fresh.
392 */
393 thread_t owner_thread = ctid_get_thread(owner_ctid);
394
395 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC, MACH_EPOCH_SYNC_WAIT) |
396 DBG_FUNC_START, space, id, epoch,
397 owner_thread != NULL ? thread_tid(owner_thread) : 0);
398
399 assert(sync->es_policy == ESYNC_POLICY_KERNEL ||
400 sync->es_policy == ESYNC_POLICY_USER);
401 turnstile_type_t tt = sync->es_policy == ESYNC_POLICY_KERNEL ?
402 TURNSTILE_EPOCH_KERNEL : TURNSTILE_EPOCH_USER;
403 struct turnstile *ts = turnstile_prepare((uintptr_t)sync,
404 &sync->es_turnstile, TURNSTILE_NULL, tt);
405
406 /*
407 * owner_thread may not be set, that's fine, the inheritor will be
408 * cleared.
409 */
410 turnstile_update_inheritor(ts, owner_thread,
411 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
412
413 wait_result_t wr = waitq_assert_wait64(&ts->ts_waitq,
414 CAST_EVENT64_T(sync), interruptible, TIMEOUT_WAIT_FOREVER);
415
416 lck_spin_unlock(&sync->es_lock);
417
418 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
419
420 if (wr == THREAD_WAITING) {
421 wr = thread_block(THREAD_CONTINUE_NULL);
422 }
423
424 lck_spin_lock(&sync->es_lock);
425
426 turnstile_complete((uintptr_t)sync, &sync->es_turnstile, NULL, tt);
427
428 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC, MACH_EPOCH_SYNC_WAIT) |
429 DBG_FUNC_END, wr);
430
431 /* Drops the lock, refcount and possibly frees sync. */
432 esync_put(ht, sync, to_be_freed);
433
434 return wr;
435 }
436
437 /*
438 * Wake up a waiter. Pre-posted wakes (wakes which happen when there is no
439 * active waiter) just return. The epoch is always updated.
440 */
441 kern_return_t
esync_wake(esync_space_t space,const uint64_t id,const uint64_t epoch,os_atomic (uint64_t)* counter,const esync_wake_mode_t mode,const ctid_t ctid)442 esync_wake(esync_space_t space, const uint64_t id, const uint64_t epoch,
443 os_atomic(uint64_t) *counter, const esync_wake_mode_t mode,
444 const ctid_t ctid)
445 {
446 assert3u(id, !=, ES_INVALID_ID);
447 assert(
448 mode == ESYNC_WAKE_ONE ||
449 mode == ESYNC_WAKE_ALL ||
450 mode == ESYNC_WAKE_ONE_WITH_OWNER ||
451 mode == ESYNC_WAKE_THREAD);
452
453 ht_t *ht = space_to_ht(space);
454
455 kern_return_t kr = KERN_FAILURE;
456
457 /*
458 * Update the epoch regardless of whether there's a waiter or not. (If
459 * there's no waiter, there will be no sync object).
460 * The epoch is read by waiters under the object lock to ensure that it
461 * doesn't miss a wake.
462 */
463 (void) esync_update_epoch(epoch, counter);
464
465 esync_t *sync = ht_get(ht, id);
466 if (sync == NULL) {
467 /* Drop pre-posted WAKEs. */
468 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC,
469 MACH_EPOCH_SYNC_WAKE_NO_WAITERS), space, id,
470 epoch, mode);
471 return KERN_NOT_WAITING;
472 }
473 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
474
475 os_ref_retain_locked(&sync->es_refcnt);
476
477 assert(sync->es_policy == ESYNC_POLICY_KERNEL ||
478 sync->es_policy == ESYNC_POLICY_USER);
479 turnstile_type_t tt = sync->es_policy == ESYNC_POLICY_KERNEL ?
480 TURNSTILE_EPOCH_KERNEL : TURNSTILE_EPOCH_USER;
481 struct turnstile *ts = turnstile_prepare((uintptr_t)sync,
482 &sync->es_turnstile, TURNSTILE_NULL, tt);
483
484 switch (mode) {
485 case ESYNC_WAKE_ONE:
486 /* The woken thread is the new inheritor. */
487 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC,
488 MACH_EPOCH_SYNC_WAKE_ONE), space, id, epoch);
489 kr = waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(sync),
490 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
491 break;
492
493 case ESYNC_WAKE_ALL:
494 /* The inheritor is cleared. */
495 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC,
496 MACH_EPOCH_SYNC_WAKE_ALL), space, id, epoch);
497 kr = waitq_wakeup64_all(&ts->ts_waitq, CAST_EVENT64_T(sync),
498 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
499 break;
500
501 case ESYNC_WAKE_ONE_WITH_OWNER:
502 /* The specified thread is the new inheritor (may be NULL). */
503 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC,
504 MACH_EPOCH_SYNC_WAKE_ONE_WITH_OWNER), space, id, epoch,
505 thread_tid(ctid_get_thread(ctid)));
506 kr = waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(sync),
507 THREAD_AWAKENED, WAITQ_WAKEUP_DEFAULT);
508 turnstile_update_inheritor(ts, ctid_get_thread(ctid),
509 TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
510 break;
511
512 case ESYNC_WAKE_THREAD:
513 /* No new inheritor. Wake the specified thread (if waiting). */
514 KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EPOCH_SYNC,
515 MACH_EPOCH_SYNC_WAKE_THREAD), space, id, epoch,
516 thread_tid(ctid_get_thread(ctid)));
517 kr = waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T(sync),
518 ctid_get_thread(ctid), WAITQ_WAKEUP_DEFAULT);
519 }
520
521 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
522
523 turnstile_complete((uintptr_t)sync, &sync->es_turnstile, NULL, tt);
524
525 /* Drops the lock, refcount and possibly frees sync. */
526 esync_put(ht, sync, NULL);
527
528 assert(kr == KERN_SUCCESS || kr == KERN_NOT_WAITING);
529 return kr;
530 }
531
532 #if DEVELOPMENT || DEBUG
533
534 #pragma mark - Tests -
535
536 /* For SYSCTL_TEST_REGISTER. */
537 #include <kern/startup.h>
538
539 /*
540 * Delay for a random amount up to ~1/2ms.
541 */
542 static void
random_delay(void)543 random_delay(void)
544 {
545 extern void read_random(void* buffer, u_int numBytes);
546 uint64_t random = 0;
547 read_random(&random, sizeof(random));
548 delay(random % 512);
549 }
550
551 /*
552 * Basic mutex-like primitive to test the epoch synchronization primitives.
553 */
554
555 /* Counter for the "client"-side. */
556 static os_atomic(uint64_t) client_counter = 0;
557
558 /* Counter for the "server"-side. */
559 static os_atomic(uint64_t) server_counter = 0;
560
561 /*
562 * The lock object stores 0 when not held and the thread's CTID when held.
563 * If there's an active waiter, bit 32 is set.
564 */
565 #define OWNER(x) ((x) & ((1ull << 32) - 1))
566 #define WAITER_BIT (1ull << 32)
567
568 /* The "mutex" itself. */
569 static uint64_t test_mutex;
570
571
572 STARTUP_ARG(LOCKS, STARTUP_RANK_LAST, ht_startup_init, &esync_test_ht);
573
574 /*
575 * Grab the lock.
576 * If already held, set a waiters bit and call esync_wait.
577 * On acquisition, if there are still waiters, set the waiters bit when taking
578 * the lock.
579 */
580 static void
test_lock(uint64_t * lock)581 test_lock(uint64_t *lock)
582 {
583 /* Counter to keep track of the number of active waiters. */
584 static os_atomic(uint32_t) test_waiter_count = 0;
585
586 const ctid_t ctid = thread_get_ctid(current_thread());
587 uint64_t old = 0;
588 uint64_t new = ctid;
589
590 while (true) {
591 /* Try to grab the lock. */
592 if (os_atomic_cmpxchgv(lock, 0, new, &old, relaxed) == 1) {
593 return;
594 }
595
596 /* Failed to grab the lock, add a waiter bit and wait. */
597 do {
598 uint64_t epoch = os_atomic_load(&client_counter, acquire);
599
600 if (os_atomic_cmpxchgv(lock, old, old | WAITER_BIT, &old, relaxed) == 1) {
601 os_atomic_inc(&test_waiter_count, acq_rel);
602
603 random_delay();
604 const wait_result_t wr = esync_wait(ESYNC_SPACE_TEST, (uintptr_t)lock, epoch,
605 &server_counter, OWNER(old), ESYNC_POLICY_KERNEL, THREAD_UNINT);
606 assert(wr == THREAD_NOT_WAITING || wr == THREAD_AWAKENED);
607 random_delay();
608
609 /*
610 * When acquiring the lock, if there are waiters make sure to
611 * set the waiters bit.
612 */
613 new = ctid;
614 if (os_atomic_dec(&test_waiter_count, acq_rel) != 0) {
615 new |= WAITER_BIT;
616 }
617 break;
618 }
619 } while (old != 0);
620 }
621 }
622
623 /*
624 * Drop the lock.
625 */
626 static void
test_unlock(uint64_t * lock)627 test_unlock(uint64_t *lock)
628 {
629 const ctid_t ctid = thread_get_ctid(current_thread());
630
631 /* Drop the lock. */
632 uint64_t old = os_atomic_xchg(lock, 0, relaxed);
633 assert3u(OWNER(old), ==, ctid);
634
635 uint64_t epoch = os_atomic_inc(&client_counter, release);
636
637 if ((old & WAITER_BIT) != 0) {
638 random_delay();
639 (void) esync_wake(ESYNC_SPACE_TEST, (uintptr_t)lock, epoch,
640 &server_counter, ESYNC_WAKE_ONE, 0);
641 random_delay();
642 }
643 }
644
645
646 /* Count to keep track of completed test threads. */
647 static os_atomic(uint64_t) test_complete_count = 0;
648
649 static void
test_lock_unlock(__unused void * arg,__unused int a)650 test_lock_unlock(__unused void *arg, __unused int a)
651 {
652 for (int c = 0; c < 10; c++) {
653 test_lock(&test_mutex);
654 random_delay();
655 test_unlock(&test_mutex);
656 }
657
658 os_atomic_inc(&test_complete_count, relaxed);
659 }
660
661 static LCK_MTX_DECLARE(esync_test_mtx, &esync_lckgrp);
662
663 /* Wait then wake. */
664 static int
esync_test(int64_t count,int64_t * out)665 esync_test(int64_t count, int64_t *out)
666 {
667 kern_return_t ret;
668 thread_t *thread = kalloc_type(thread_t, count,
669 Z_WAITOK | Z_ZERO | Z_NOFAIL);
670
671 printf("%s: STARTING\n", __func__);
672
673 lck_mtx_lock(&esync_test_mtx);
674
675 for (int64_t i = 0; i < count; i++) {
676 ret = kernel_thread_start_priority(test_lock_unlock, NULL,
677 BASEPRI_DEFAULT, &thread[i]);
678 assert3u(ret, ==, KERN_SUCCESS);
679 }
680
681 /* Wait for completion. */
682 while (test_complete_count != count) {
683 delay(100000);
684 }
685
686 os_atomic_store(&test_complete_count, 0, relaxed);
687
688 /* Drop the thread refs. */
689 for (int i = 0; i < count; i++) {
690 thread_deallocate(thread[i]);
691 }
692
693 os_atomic_store(&server_counter, 0, relaxed);
694 os_atomic_store(&client_counter, 0, relaxed);
695
696 lck_mtx_unlock(&esync_test_mtx);
697
698 printf("%s: SUCCESS\n", __func__);
699
700 kfree_type(thread_t, count, thread);
701
702 *out = 1;
703
704 return 0;
705 }
706
707 SYSCTL_TEST_REGISTER(esync_test, esync_test);
708
709 /*
710 * Block the caller on an interruptible wait. The thread must be terminated in
711 * order for this test to return.
712 */
713 static int
esync_test_wait(__unused int64_t in,__unused int64_t * out)714 esync_test_wait(__unused int64_t in, __unused int64_t *out)
715 {
716 os_atomic(uint64_t) counter = 0;
717
718 printf("%s: STARTING\n", __func__);
719
720 wait_result_t wr = esync_wait(ESYNC_SPACE_TEST, 0, 0, &counter, 0,
721 ESYNC_POLICY_USER, THREAD_INTERRUPTIBLE);
722 if (wr != THREAD_INTERRUPTED) {
723 printf("%s: FAILURE - unexpected wait result (%d)\n", __func__, wr);
724 *out = -1;
725 return 0;
726 }
727
728 printf("%s: SUCCESS\n", __func__);
729
730 *out = 1;
731
732 return 0;
733 }
734
735 SYSCTL_TEST_REGISTER(esync_test_wait, esync_test_wait);
736
737 #endif /* DEVELOPMENT || DEBUG */
738