1 /*
2 * Copyright (c) 2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/assert.h>
30 #include <kern/epoch_sync.h>
31 #include <kern/kalloc.h>
32 #include <kern/locks.h>
33 #include <kern/sched_prim.h>
34 #include <kern/turnstile.h>
35
36 #include <os/atomic.h>
37 #include <os/hash.h>
38 #include <os/overflow.h>
39
40 #include <stdint.h>
41
42 #define ES_INVALID_ID UINT64_MAX
43
44 static LCK_GRP_DECLARE(esync_lckgrp, "esync");
45 os_refgrp_decl(static, esync_refgrp, "esync", NULL);
46
47 typedef struct {
48 uint64_t es_id; /* Synchronization ID. */
49 struct turnstile *es_turnstile; /* Associated turnstile. */
50 esync_policy_t es_policy; /* Determines turnstile policy. */
51 lck_spin_t es_lock; /* Interlock. */
52 os_refcnt_t es_refcnt; /* Reference count for lifecycle. */
53 queue_chain_t es_link; /* Link for hash table. */
54 } esync_t;
55
56 #pragma mark - Hash Table Implementation -
57
58 static LCK_GRP_DECLARE(ht_lck_grp, "ht");
59
60 typedef struct {
61 queue_head_t htb_head;
62 lck_spin_t htb_lock;
63 } ht_bucket_t;
64
65 typedef struct ht {
66 const uint32_t ht_size;
67 ht_bucket_t *ht_bucket;
68 } ht_t;
69
70 /*
71 * Eventually it would be better to have "clients" just dynamically allocate
72 * these as needed and not only support two static ID spaces.
73 */
74
75 #define NBUCKETS_QUEUE 512
76 ht_t esync_queue_ht = {
77 .ht_size = NBUCKETS_QUEUE,
78 .ht_bucket = &(ht_bucket_t[NBUCKETS_QUEUE]){}[0],
79 };
80
81 #define NBUCKETS_THREAD 64
82 ht_t esync_thread_ht = {
83 .ht_size = NBUCKETS_THREAD,
84 .ht_bucket = &(ht_bucket_t[NBUCKETS_THREAD]){}[0],
85 };
86
87 static __startup_func void
ht_startup_init(ht_t * ht)88 ht_startup_init(ht_t *ht)
89 {
90 for (uint32_t i = 0; i < ht->ht_size; i++) {
91 queue_init(&ht->ht_bucket[i].htb_head);
92 lck_spin_init(&ht->ht_bucket[i].htb_lock, &ht_lck_grp, NULL);
93 }
94 }
95 STARTUP_ARG(LOCKS, STARTUP_RANK_LAST, ht_startup_init, &esync_queue_ht);
96 STARTUP_ARG(LOCKS, STARTUP_RANK_LAST, ht_startup_init, &esync_thread_ht);
97
98 static inline ht_bucket_t *
ht_get_bucket(ht_t * ht,const uint64_t key)99 ht_get_bucket(ht_t *ht, const uint64_t key)
100 {
101 assert3u((ht->ht_size & (ht->ht_size - 1)), ==, 0);
102
103 const uint32_t idx = os_hash_jenkins(&key, sizeof(key)) & (ht->ht_size - 1);
104 return &ht->ht_bucket[idx];
105 }
106
107 static esync_t *
ht_put(ht_t * ht,const uint64_t key,esync_t * new_value)108 ht_put(ht_t *ht, const uint64_t key, esync_t *new_value)
109 {
110 /* 'new_value' shouldn't be part of an existing queue. */
111 assert3p(new_value->es_link.next, ==, NULL);
112 assert3p(new_value->es_link.prev, ==, NULL);
113
114 ht_bucket_t *bucket = ht_get_bucket(ht, key);
115
116 lck_spin_lock_grp(&bucket->htb_lock, &ht_lck_grp);
117
118 esync_t *value = NULL;
119 esync_t *elem = NULL;
120 qe_foreach_element(elem, &bucket->htb_head, es_link) {
121 if (elem->es_id != key) {
122 continue;
123 }
124
125 lck_spin_lock_grp(&elem->es_lock, &esync_lckgrp);
126 if (elem->es_id == key) {
127 value = elem;
128 break;
129 }
130 lck_spin_unlock(&elem->es_lock);
131 }
132
133 if (value == NULL) {
134 value = new_value;
135 lck_spin_lock_grp(&value->es_lock, &esync_lckgrp);
136 enqueue(&bucket->htb_head, &value->es_link);
137 }
138
139 lck_spin_unlock(&bucket->htb_lock);
140
141 return value;
142 }
143
144 static void
ht_remove(ht_t * ht,const uint64_t key,esync_t * value)145 ht_remove(ht_t *ht, const uint64_t key, esync_t *value)
146 {
147 ht_bucket_t *bucket = ht_get_bucket(ht, key);
148
149 lck_spin_lock_grp(&bucket->htb_lock, &ht_lck_grp);
150 remqueue(&value->es_link);
151 lck_spin_unlock(&bucket->htb_lock);
152
153 assert3p(value->es_link.next, ==, NULL);
154 assert3p(value->es_link.prev, ==, NULL);
155 }
156
157 static esync_t *
ht_get(ht_t * ht,const uint64_t key)158 ht_get(ht_t *ht, const uint64_t key)
159 {
160 ht_bucket_t *bucket = ht_get_bucket(ht, key);
161
162 lck_spin_lock_grp(&bucket->htb_lock, &ht_lck_grp);
163
164 esync_t *value = NULL;
165 esync_t *elem = NULL;
166 qe_foreach_element(elem, &bucket->htb_head, es_link) {
167 if (elem->es_id != key) {
168 continue;
169 }
170
171 lck_spin_lock_grp(&elem->es_lock, &esync_lckgrp);
172 if (elem->es_id == key) {
173 value = elem;
174 break;
175 }
176 lck_spin_unlock(&elem->es_lock);
177 }
178
179 lck_spin_unlock(&bucket->htb_lock);
180
181 return value;
182 }
183
184 #pragma mark - Epoch Sync Implementation -
185
186 /*
187 * Allocate a backing object.
188 */
189 static esync_t *
esync_alloc(const uint64_t id,const esync_policy_t policy)190 esync_alloc(const uint64_t id, const esync_policy_t policy)
191 {
192 assert3u(id, !=, ES_INVALID_ID);
193
194 esync_t *sync = kalloc_type(esync_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
195 assert3p(sync, !=, NULL);
196
197 sync->es_id = id;
198 sync->es_turnstile = TURNSTILE_NULL;
199 sync->es_policy = policy;
200
201 lck_spin_init(&sync->es_lock, &esync_lckgrp, NULL);
202
203 os_ref_init_count(&sync->es_refcnt, &esync_refgrp, 1);
204
205 return sync;
206 }
207
208 /*
209 * Free a backing object.
210 */
211 static void
esync_free(esync_t * sync)212 esync_free(esync_t *sync)
213 {
214 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_NOTOWNED);
215 assert3p(sync->es_turnstile, ==, TURNSTILE_NULL);
216 assert3u(os_ref_get_count(&sync->es_refcnt), ==, 0);
217
218 lck_spin_destroy(&sync->es_lock, &esync_lckgrp);
219
220 kfree_type(esync_t, sync);
221 }
222
223 /*
224 * Stop using 'sync'. Drop the ref count and possibly remove it from the hash
225 * table and free it. Free up an unused entry if not NULL.
226 * Called with the object locked.
227 */
228 static void
esync_put(ht_t * ht,esync_t * sync,esync_t * to_be_freed)229 esync_put(ht_t *ht, esync_t *sync, esync_t *to_be_freed)
230 {
231 os_ref_count_t cnt = 0;
232
233 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
234
235 /* The last owner will remove it from the hash table. */
236 cnt = os_ref_get_count(&sync->es_refcnt);
237 if (cnt == 2) {
238 /*
239 * Make sure no other thread will match it during the window
240 * where the lock is dropped but before it's been removed from
241 * the hash table (lookups are protected by es_lock as called
242 * from esync_acquire).
243 */
244 const uint64_t id = sync->es_id;
245 sync->es_id = ES_INVALID_ID;
246 lck_spin_unlock(&sync->es_lock);
247
248 ht_remove(ht, id, sync);
249
250 /* Drop the ref associated with the hash table. */
251 (void) os_ref_release(&sync->es_refcnt);
252
253 /* Drop the final refcnt and free it. */
254 cnt = os_ref_release(&sync->es_refcnt);
255 assert3u(cnt, ==, 0);
256
257 /*
258 * Before freeing (and potentially taking another lock), call
259 * turnstile_cleanup().
260 */
261 turnstile_cleanup();
262 esync_free(sync);
263 } else {
264 cnt = os_ref_release_locked(&sync->es_refcnt);
265 assert3u(cnt, >=, 2);
266 lck_spin_unlock(&sync->es_lock);
267 turnstile_cleanup();
268 }
269
270 /* An unused entry, free it. */
271 if (to_be_freed != NULL) {
272 cnt = os_ref_release(&to_be_freed->es_refcnt);
273 assert3u(cnt, ==, 0);
274 esync_free(to_be_freed);
275 }
276 }
277
278 /*
279 * Get an object associated with 'id'. If there isn't one already, allocate one
280 * and insert it.
281 * Returns with the object locked and a +1 on the refcount.
282 */
283 static esync_t *
esync_get(ht_t * ht,const uint64_t id,const esync_policy_t policy,esync_t ** const to_be_freed)284 esync_get(ht_t *ht, const uint64_t id, const esync_policy_t policy,
285 esync_t **const to_be_freed)
286 {
287 esync_t *new = esync_alloc(id, policy);
288 esync_t *sync = ht_put(ht, id, new);
289
290 /*
291 * See if the newly allocated entry was inserted. If so, then there's
292 * nothing extra to clean up later (in case cleanup is needed, it must
293 * be done later as the spinlock is held at this point).
294 * ht_put consumes the refcount of new if the entry was inserted.
295 */
296 *to_be_freed = (sync != new) ? new : NULL;
297
298 /*
299 * The policy of the sync object should always match. i.e. the
300 * consumer of the esync interfaces must guarantee that all waiters use
301 * the same policy.
302 */
303 assert3u(sync->es_policy, ==, policy);
304
305 os_ref_retain_locked(&sync->es_refcnt);
306
307 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
308 return sync;
309 }
310
311 /*
312 * Update the epoch counter with a new epoch.
313 * Returns true if the epoch was newer or equal to the existing epoch.
314 */
315 static bool
esync_update_epoch(const uint64_t epoch,os_atomic (uint64_t)* counter)316 esync_update_epoch(const uint64_t epoch, os_atomic(uint64_t) *counter)
317 {
318 uint64_t old, new;
319
320 return os_atomic_rmw_loop(counter, old, new, acq_rel, {
321 if (old > epoch) {
322 os_atomic_rmw_loop_give_up();
323 }
324 new = epoch;
325 }) == 1;
326 }
327
328 /*
329 * Block until esync_wake() is called on this id.
330 * The epoch is incremented by the client on wakes. If the epoch is stale, then
331 * don't block and return immediately.
332 * Can allocate a new epoch synchronization object if needed.
333 * Will only use "owner" if the epoch is fresh.
334 */
335 wait_result_t
esync_wait(ht_t * ht,const uint64_t id,const uint64_t epoch,os_atomic (uint64_t)* counter,const ctid_t owner_ctid,const esync_policy_t policy,const wait_interrupt_t interruptible)336 esync_wait(ht_t *ht, const uint64_t id, const uint64_t epoch,
337 os_atomic(uint64_t) *counter, const ctid_t owner_ctid,
338 const esync_policy_t policy, const wait_interrupt_t interruptible)
339 {
340 assert3p(ht, !=, NULL);
341 assert3u(id, !=, ES_INVALID_ID);
342
343 esync_t *to_be_freed = NULL;
344 esync_t *sync = esync_get(ht, id, policy, &to_be_freed);
345
346 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
347
348 const bool fresh_epoch = esync_update_epoch(epoch, counter);
349 if (!fresh_epoch) {
350 esync_put(ht, sync, to_be_freed);
351 return THREAD_NOT_WAITING;
352 }
353
354 assert(sync->es_policy == ESYNC_POLICY_KERNEL ||
355 sync->es_policy == ESYNC_POLICY_USER);
356 turnstile_type_t tt = sync->es_policy == ESYNC_POLICY_KERNEL ?
357 TURNSTILE_EPOCH_KERNEL : TURNSTILE_EPOCH_USER;
358 struct turnstile *ts = turnstile_prepare((uintptr_t)sync,
359 &sync->es_turnstile, TURNSTILE_NULL, tt);
360
361 /*
362 * owner_ctid may not be set, that's fine, the inheritor will be
363 * cleared.
364 */
365 thread_t owner_thread = ctid_get_thread(owner_ctid);
366
367 turnstile_update_inheritor(ts, owner_thread,
368 (TURNSTILE_DELAYED_UPDATE | TURNSTILE_INHERITOR_THREAD));
369
370 wait_result_t wr = waitq_assert_wait64(&ts->ts_waitq,
371 CAST_EVENT64_T(sync), interruptible, TIMEOUT_WAIT_FOREVER);
372
373 lck_spin_unlock(&sync->es_lock);
374
375 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
376
377 if (wr == THREAD_WAITING) {
378 wr = thread_block(THREAD_CONTINUE_NULL);
379 }
380
381 lck_spin_lock(&sync->es_lock);
382
383 turnstile_complete((uintptr_t)sync, &sync->es_turnstile, NULL, tt);
384
385 /* Drops the lock, refcount and possibly frees sync. */
386 esync_put(ht, sync, to_be_freed);
387
388 return wr;
389 }
390
391 /*
392 * Wake up a waiter. Pre-posted wakes (wakes which happen when there is no
393 * active waiter) just return. The epoch is always updated.
394 */
395 kern_return_t
esync_wake(ht_t * ht,const uint64_t id,const uint64_t epoch,os_atomic (uint64_t)* counter,const esync_wake_mode_t mode,const ctid_t ctid)396 esync_wake(ht_t *ht, const uint64_t id, const uint64_t epoch,
397 os_atomic(uint64_t) *counter, const esync_wake_mode_t mode,
398 const ctid_t ctid)
399 {
400 assert3p(ht, !=, NULL);
401 assert3u(id, !=, ES_INVALID_ID);
402 assert(
403 mode == ESYNC_WAKE_ONE ||
404 mode == ESYNC_WAKE_ALL ||
405 mode == ESYNC_WAKE_ONE_WITH_OWNER ||
406 mode == ESYNC_WAKE_THREAD);
407
408 kern_return_t kr = KERN_FAILURE;
409
410 /*
411 * Update the epoch regardless of whether there's a waiter or not. (If
412 * there's no waiter, there will be no sync object).
413 * The epoch is read by waiters under the object lock to ensure that it
414 * doesn't miss a wake.
415 */
416 (void) esync_update_epoch(epoch, counter);
417
418 esync_t *sync = ht_get(ht, id);
419 if (sync == NULL) {
420 /* Drop pre-posted WAKEs. */
421 return KERN_NOT_WAITING;
422 }
423 LCK_SPIN_ASSERT(&sync->es_lock, LCK_ASSERT_OWNED);
424
425 os_ref_retain_locked(&sync->es_refcnt);
426
427 assert(sync->es_policy == ESYNC_POLICY_KERNEL ||
428 sync->es_policy == ESYNC_POLICY_USER);
429 turnstile_type_t tt = sync->es_policy == ESYNC_POLICY_KERNEL ?
430 TURNSTILE_EPOCH_KERNEL : TURNSTILE_EPOCH_USER;
431 struct turnstile *ts = turnstile_prepare((uintptr_t)sync,
432 &sync->es_turnstile, TURNSTILE_NULL, tt);
433
434 switch (mode) {
435 case ESYNC_WAKE_ONE:
436 /* The woken thread is the new inheritor. */
437 kr = waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(sync),
438 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
439 break;
440
441 case ESYNC_WAKE_ALL:
442 /* The inheritor is cleared. */
443 kr = waitq_wakeup64_all(&ts->ts_waitq, CAST_EVENT64_T(sync),
444 THREAD_AWAKENED, WAITQ_UPDATE_INHERITOR);
445 break;
446
447 case ESYNC_WAKE_ONE_WITH_OWNER:
448 /* The specified thread is the new inheritor (may be NULL). */
449 kr = waitq_wakeup64_one(&ts->ts_waitq, CAST_EVENT64_T(sync),
450 THREAD_AWAKENED, WAITQ_WAKEUP_DEFAULT);
451 turnstile_update_inheritor(ts, ctid_get_thread(ctid),
452 TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
453 break;
454
455 case ESYNC_WAKE_THREAD:
456 /* No new inheritor. Wake the specified thread (if waiting). */
457 kr = waitq_wakeup64_thread(&ts->ts_waitq, CAST_EVENT64_T(sync),
458 ctid_get_thread(ctid), WAITQ_WAKEUP_DEFAULT);
459 }
460
461 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_HELD);
462
463 turnstile_complete((uintptr_t)sync, &sync->es_turnstile, NULL, tt);
464
465 /* Drops the lock, refcount and possibly frees sync. */
466 esync_put(ht, sync, NULL);
467
468 assert(kr == KERN_SUCCESS || kr == KERN_NOT_WAITING);
469 return kr;
470 }
471
472 #if DEVELOPMENT || DEBUG
473
474 #pragma mark - Tests -
475
476 /* For SYSCTL_TEST_REGISTER. */
477 #include <kern/startup.h>
478
479 /*
480 * Delay for a random amount up to ~1/2ms.
481 */
482 static void
random_delay(void)483 random_delay(void)
484 {
485 extern void read_random(void* buffer, u_int numBytes);
486 uint64_t random = 0;
487 read_random(&random, sizeof(random));
488 delay(random % 512);
489 }
490
491 /*
492 * Basic mutex-like primitive to test the epoch synchronization primitives.
493 */
494
495 /* Counter for the "client"-side. */
496 static os_atomic(uint64_t) client_counter = 0;
497
498 /* Counter for the "server"-side. */
499 static os_atomic(uint64_t) server_counter = 0;
500
501 /*
502 * The lock object stores 0 when not held and the thread's CTID when held.
503 * If there's an active waiter, bit 32 is set.
504 */
505 #define OWNER(x) ((x) & ((1ull << 32) - 1))
506 #define WAITER_BIT (1ull << 32)
507
508 /* The "mutex" itself. */
509 static uint64_t test_mutex;
510
511 #define NBUCKETS_TEST 8
512 static ht_t esync_test_ht = {
513 .ht_size = NBUCKETS_TEST,
514 .ht_bucket = &(ht_bucket_t[NBUCKETS_TEST]){}[0],
515 };
516 STARTUP_ARG(LOCKS, STARTUP_RANK_LAST, ht_startup_init, &esync_test_ht);
517
518 /*
519 * Grab the lock.
520 * If already held, set a waiters bit and call esync_wait.
521 * On acquisition, if there are still waiters, set the waiters bit when taking
522 * the lock.
523 */
524 static void
test_lock(uint64_t * lock)525 test_lock(uint64_t *lock)
526 {
527 /* Counter to keep track of the number of active waiters. */
528 static os_atomic(uint32_t) test_waiter_count = 0;
529
530 const ctid_t ctid = thread_get_ctid(current_thread());
531 uint64_t old = 0;
532 uint64_t new = ctid;
533
534 while (true) {
535 /* Try to grab the lock. */
536 if (os_atomic_cmpxchgv(lock, 0, new, &old, relaxed) == 1) {
537 return;
538 }
539
540 /* Failed to grab the lock, add a waiter bit and wait. */
541 do {
542 uint64_t epoch = os_atomic_load(&client_counter, acquire);
543
544 if (os_atomic_cmpxchgv(lock, old, old | WAITER_BIT, &old, relaxed) == 1) {
545 os_atomic_inc(&test_waiter_count, acq_rel);
546
547 random_delay();
548 const wait_result_t wr = esync_wait(&esync_test_ht, (uintptr_t)lock, epoch,
549 &server_counter, OWNER(old), ESYNC_POLICY_KERNEL, THREAD_UNINT);
550 assert(wr == THREAD_NOT_WAITING || wr == THREAD_AWAKENED);
551 random_delay();
552
553 /*
554 * When acquiring the lock, if there are waiters make sure to
555 * set the waiters bit.
556 */
557 new = ctid;
558 if (os_atomic_dec(&test_waiter_count, acq_rel) != 0) {
559 new |= WAITER_BIT;
560 }
561 break;
562 }
563 } while (old != 0);
564 }
565 }
566
567 /*
568 * Drop the lock.
569 */
570 static void
test_unlock(uint64_t * lock)571 test_unlock(uint64_t *lock)
572 {
573 const ctid_t ctid = thread_get_ctid(current_thread());
574
575 /* Drop the lock. */
576 uint64_t old = os_atomic_xchg(lock, 0, relaxed);
577 assert3u(OWNER(old), ==, ctid);
578
579 uint64_t epoch = os_atomic_inc(&client_counter, release);
580
581 if ((old & WAITER_BIT) != 0) {
582 random_delay();
583 (void) esync_wake(&esync_test_ht, (uintptr_t)lock, epoch,
584 &server_counter, ESYNC_WAKE_ONE, 0);
585 random_delay();
586 }
587 }
588
589
590 /* Count to keep track of completed test threads. */
591 static os_atomic(uint64_t) test_complete_count = 0;
592
593 static void
test_lock_unlock(__unused void * arg,__unused int a)594 test_lock_unlock(__unused void *arg, __unused int a)
595 {
596 for (int c = 0; c < 10; c++) {
597 test_lock(&test_mutex);
598 random_delay();
599 test_unlock(&test_mutex);
600 }
601
602 os_atomic_inc(&test_complete_count, relaxed);
603 }
604
605 static LCK_MTX_DECLARE(esync_test_mtx, &esync_lckgrp);
606
607 /* Wait then wake. */
608 static int
esync_test(int64_t count,int64_t * out)609 esync_test(int64_t count, int64_t *out)
610 {
611 kern_return_t ret;
612 thread_t *thread = kalloc_type(thread_t, count,
613 Z_WAITOK | Z_ZERO | Z_NOFAIL);
614
615 printf("%s: STARTING\n", __func__);
616
617 lck_mtx_lock(&esync_test_mtx);
618
619 for (int64_t i = 0; i < count; i++) {
620 ret = kernel_thread_start_priority(test_lock_unlock, NULL,
621 BASEPRI_DEFAULT, &thread[i]);
622 assert3u(ret, ==, KERN_SUCCESS);
623 }
624
625 /* Wait for completion. */
626 while (test_complete_count != count) {
627 delay(100000);
628 }
629
630 os_atomic_store(&test_complete_count, 0, relaxed);
631
632 /* Drop the thread refs. */
633 for (int i = 0; i < count; i++) {
634 thread_deallocate(thread[i]);
635 }
636
637 os_atomic_store(&server_counter, 0, relaxed);
638 os_atomic_store(&client_counter, 0, relaxed);
639
640 lck_mtx_unlock(&esync_test_mtx);
641
642 printf("%s: SUCCESS\n", __func__);
643
644 kfree_type(thread_t, count, thread);
645
646 *out = 1;
647
648 return 0;
649 }
650
651 SYSCTL_TEST_REGISTER(esync_test, esync_test);
652
653 /*
654 * Block the caller on an interruptible wait. The thread must be terminated in
655 * order for this test to return.
656 */
657 static int
esync_test_wait(__unused int64_t in,__unused int64_t * out)658 esync_test_wait(__unused int64_t in, __unused int64_t *out)
659 {
660 os_atomic(uint64_t) counter = 0;
661
662 printf("%s: STARTING\n", __func__);
663
664 wait_result_t wr = esync_wait(&esync_test_ht, 0, 0, &counter, 0,
665 ESYNC_POLICY_USER, THREAD_INTERRUPTIBLE);
666 if (wr != THREAD_INTERRUPTED) {
667 printf("%s: FAILURE - unexpected wait result (%d)\n", __func__, wr);
668 *out = -1;
669 return 0;
670 }
671
672 printf("%s: SUCCESS\n", __func__);
673
674 *out = 1;
675
676 return 0;
677 }
678
679 SYSCTL_TEST_REGISTER(esync_test_wait, esync_test_wait);
680
681 #endif /* DEVELOPMENT || DEBUG */
682