1 /*
2 * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56
57 #include <kern/ast.h>
58 #include <kern/backtrace.h>
59 #include <kern/kern_types.h>
60 #include <kern/mach_param.h>
61 #include <kern/percpu.h>
62 #include <kern/queue.h>
63 #include <kern/sched_prim.h>
64 #include <kern/simple_lock.h>
65 #include <kern/spl.h>
66 #include <kern/waitq.h>
67 #include <kern/zalloc.h>
68 #include <kern/policy_internal.h>
69 #include <kern/turnstile.h>
70
71 #include <os/hash.h>
72 #include <libkern/section_keywords.h>
73 #include <mach/sync_policy.h>
74 #include <vm/vm_kern.h>
75
76 #include <sys/kdebug.h>
77
78 /*!
79 * @const waitq_set_unlink_batch
80 *
81 * @brief
82 * How many links are unhooked under a single set lock hold.
83 *
84 * @discussion
85 * Holding a waitq set lock for too long can cause
86 * extreme contention (when a set is being torn down concurrently
87 * to messages being sent to ports who used to belong to that set).
88 *
89 * In order to fight this, large wait queue sets will drop
90 * and reacquire their lock for each unlinking batch.
91 */
92 static TUNABLE(uint32_t, waitq_set_unlink_batch, "waitq_set_unlink_batch", 64);
93
94 /*!
95 * @const WQL_PREPOST_MARKER
96 *
97 * @brief
98 * Marker set in the @c wql_wqs field of wait queue linkages to denote that
99 * this linkage has preposted to its wait queue set already.
100 *
101 * @discussion
102 * This bit is manipulated under both the wait queue and the wait queue set
103 * locks, and is used for two purposes:
104 *
105 * - for port set queues, it denotes in which circle queue the linkage
106 * is queued on (@c waitq_set::wqset_links or @c waitq_set::wqset_preposts)
107 *
108 * - as an optimization during pre-post to not walk sets this link already
109 * preposted to.
110 */
111 #define WQL_PREPOST_MARKER 1ul
112
113 #if __LP64__
114 /*!
115 * @struct waitq_link_hdr
116 *
117 * @brief
118 * Common "header" between all linkages, in order to find the waitq_set
119 * of this linkage.
120 *
121 * @discussion
122 * Due to unfortunate alignment constraints on @c queue_chain_t,
123 * this is wildly different for LP64 and ILP32.
124 *
125 * Do note that `wql
126 */
127 struct waitq_link_hdr {
128 uintptr_t wql_wqs;
129 };
130
131 /*!
132 * @struct waitq_sellink
133 *
134 * @brief
135 * Linkages used for select waitq queues to select wait queue sets.
136 *
137 * @discussion
138 * Select linkages are one way (queue to set) for two reasons:
139 *
140 * 1. select doesn't use the wait queue subsystem to discover which file
141 * descriptor woke up the set (it will instead scan all fds again),
142 *
143 * 2. all linkages are unhooked on each syscall return, so we minimize
144 * work to be done to be as quick as possible, using a fast invalidation
145 * scheme based on unique identifiers and sequestering
146 * (see @c select_set_nextid()).
147 */
148 struct waitq_sellink {
149 uintptr_t wql_wqs;
150 struct waitq_link_list_entry wql_next;
151 uint64_t wql_setid;
152 };
153
154 /*!
155 * @struct waitq_link
156 *
157 * @brief
158 * Linkages used for port wait queues and port-set wait queue sets.
159 *
160 * @discussion
161 * Those linkages go both ways so that receiving messages through a port-set
162 * can quickly find ports that preposted to the set.
163 *
164 * It also means that unhooking linkages cannot be lazy.
165 */
166 struct waitq_link {
167 uintptr_t wql_wqs; /**< wait queue set for this link */
168 queue_chain_t wql_qlink; /**< linkage through the waitq list */
169 queue_chain_t wql_slink; /**< linkage through the wqset list */
170 struct waitq *wql_wq; /**< wait queue for this link */
171 };
172 #else
173 struct waitq_link_hdr {
174 uint64_t __wql_padding;
175 uintptr_t wql_wqs;
176 };
177
178 struct waitq_sellink {
179 struct waitq_link_list_entry wql_next;
180 uintptr_t __wql_padding;
181 uintptr_t wql_wqs;
182 uint64_t wql_setid;
183 };
184
185 struct waitq_link {
186 queue_chain_t wql_qlink;
187 uintptr_t wql_wqs;
188 struct waitq *wql_wq;
189 queue_chain_t wql_slink;
190 };
191 #endif
192
193 static_assert(offsetof(struct waitq_link_hdr, wql_wqs) ==
194 offsetof(struct waitq_sellink, wql_wqs));
195 static_assert(offsetof(struct waitq_link_hdr, wql_wqs) ==
196 offsetof(struct waitq_link, wql_wqs));
197 static_assert(sizeof(struct waitq) <= WQ_OPAQUE_SIZE, "waitq structure size mismatch");
198 static_assert(__alignof(struct waitq) == WQ_OPAQUE_ALIGN, "waitq structure alignment mismatch");
199
200 static KALLOC_TYPE_DEFINE(waitq_sellink_zone, struct waitq_sellink, KT_PRIV_ACCT);
201 static KALLOC_TYPE_DEFINE(waitq_link_zone, struct waitq_link, KT_PRIV_ACCT);
202 ZONE_DEFINE_ID(ZONE_ID_SELECT_SET, "select_set", struct select_set,
203 ZC_SEQUESTER | ZC_KASAN_NOQUARANTINE | ZC_ZFREE_CLEARMEM);
204
205 static LCK_GRP_DECLARE(waitq_lck_grp, "waitq");
206
207 static uint64_t PERCPU_DATA(select_setid);
208 struct waitq select_conflict_queue;
209
210 #pragma mark waitq links
211
212 static inline bool
waitq_is_sellink(waitq_type_t type)213 waitq_is_sellink(waitq_type_t type)
214 {
215 return type == WQT_SELECT || type == WQT_SELECT_SET;
216 }
217
218 static inline bool
wql_sellink_valid(struct select_set * selset,struct waitq_sellink * link)219 wql_sellink_valid(struct select_set *selset, struct waitq_sellink *link)
220 {
221 return waitq_valid(selset) && selset->selset_id == link->wql_setid;
222 }
223
224 static waitq_t
wql_wqs(waitq_link_t link)225 wql_wqs(waitq_link_t link)
226 {
227 return (waitq_t){ (void *)(link.wqlh->wql_wqs & ~WQL_PREPOST_MARKER) };
228 }
229
230 static bool
wql_wqs_preposted(waitq_link_t link)231 wql_wqs_preposted(waitq_link_t link)
232 {
233 return link.wqlh->wql_wqs & WQL_PREPOST_MARKER;
234 }
235
236 static void
wql_wqs_mark_preposted(waitq_link_t link)237 wql_wqs_mark_preposted(waitq_link_t link)
238 {
239 assert(!wql_wqs_preposted(link));
240 link.wqlh->wql_wqs |= WQL_PREPOST_MARKER;
241 }
242
243 static void
wql_wqs_clear_preposted(waitq_link_t link)244 wql_wqs_clear_preposted(waitq_link_t link)
245 {
246 assert(wql_wqs_preposted(link));
247 link.wqlh->wql_wqs &= ~WQL_PREPOST_MARKER;
248 }
249
250 static circle_queue_t
wql_wqs_queue(struct waitq_set * wqs,struct waitq_link * link)251 wql_wqs_queue(struct waitq_set *wqs, struct waitq_link *link)
252 {
253 return wql_wqs_preposted(link) ? &wqs->wqset_preposts : &wqs->wqset_links;
254 }
255
256 static void
wql_list_push(waitq_link_list_t * list,waitq_link_t link)257 wql_list_push(waitq_link_list_t *list, waitq_link_t link)
258 {
259 link.wqls->wql_next.next = list->next;
260 list->next = &link.wqls->wql_next;
261 }
262
263 static inline struct waitq_sellink *
wql_list_elem(struct waitq_link_list_entry * e)264 wql_list_elem(struct waitq_link_list_entry *e)
265 {
266 return e ? __container_of(e, struct waitq_sellink, wql_next) : NULL;
267 }
268
269 /*!
270 * @function wql_list_next()
271 *
272 * @brief
273 * Helper function to implement wait queue link list enumeration.
274 *
275 * @param e in: pointer to the current element,
276 * out: pointer to the next element or NULL
277 * @param end which element to stop enumeration at (NULL for lists,
278 * or the first element enumerated for circle queues).
279 * @returns true (makes writing for(;;) based enumerators easier).
280 */
281 static inline bool
wql_list_next(struct waitq_link_list_entry ** e,struct waitq_link_list_entry * end)282 wql_list_next(struct waitq_link_list_entry **e, struct waitq_link_list_entry *end)
283 {
284 if (*e == NULL || (*e)->next == end) {
285 *e = NULL;
286 } else {
287 *e = (*e)->next;
288 }
289 return true;
290 }
291
292 #define __wql_list_foreach(it, head, end) \
293 for (struct waitq_link_list_entry *__it = (head)->next, *__end = end; \
294 ((it) = wql_list_elem(__it)); wql_list_next(&__it, __end))
295
296 #define wql_list_foreach(it, head) \
297 __wql_list_foreach(it, head, NULL)
298
299 #define wql_list_foreach_safe(it, head) \
300 for (struct waitq_link_list_entry *__it = (head)->next; \
301 ((it) = wql_list_elem(__it)) && wql_list_next(&__it, NULL); )
302
303 /*
304 * Gross hack: passing `__it` to `__wql_list_foreach` makes it stop whether
305 * we circle back to the first element or NULL (whichever comes first).
306 *
307 * This allows to have a single enumeration function oblivious to whether
308 * we enumerate a circle queue or a sellink list.
309 */
310 #define waitq_link_foreach(link, waitq) \
311 __wql_list_foreach((link).wqls, &(waitq).wq_q->waitq_sellinks, __it)
312
313 static_assert(offsetof(struct waitq, waitq_sellinks) ==
314 offsetof(struct waitq, waitq_links));
315 static_assert(offsetof(struct waitq_sellink, wql_next) ==
316 offsetof(struct waitq_link, wql_qlink.next));
317
318 static struct waitq_link *
wql_find(struct waitq * waitq,waitq_t wqset)319 wql_find(struct waitq *waitq, waitq_t wqset)
320 {
321 struct waitq_link *link;
322
323 cqe_foreach_element(link, &waitq->waitq_links, wql_qlink) {
324 if (waitq_same(wql_wqs(link), wqset)) {
325 return link;
326 }
327 }
328
329 return NULL;
330 }
331
332 waitq_link_t
waitq_link_alloc(waitq_type_t type)333 waitq_link_alloc(waitq_type_t type)
334 {
335 waitq_link_t link;
336
337 if (waitq_is_sellink(type)) {
338 link.wqls = zalloc_flags(waitq_sellink_zone, Z_WAITOK | Z_ZERO);
339 } else {
340 link.wqll = zalloc_flags(waitq_link_zone, Z_WAITOK | Z_ZERO);
341 }
342 return link;
343 }
344
345 void
waitq_link_free(waitq_type_t type,waitq_link_t link)346 waitq_link_free(waitq_type_t type, waitq_link_t link)
347 {
348 if (waitq_is_sellink(type)) {
349 return zfree(waitq_sellink_zone, link.wqls);
350 } else {
351 return zfree(waitq_link_zone, link.wqll);
352 }
353 }
354
355 void
waitq_link_free_list(waitq_type_t type,waitq_link_list_t * free_l)356 waitq_link_free_list(waitq_type_t type, waitq_link_list_t *free_l)
357 {
358 waitq_link_t link;
359
360 wql_list_foreach_safe(link.wqls, free_l) {
361 waitq_link_free(type, link);
362 }
363
364 free_l->next = NULL;
365 }
366
367
368 #pragma mark global wait queues
369
370 static __startup_data struct waitq g_boot_waitq;
371 static SECURITY_READ_ONLY_LATE(struct waitq *) global_waitqs = &g_boot_waitq;
372 static SECURITY_READ_ONLY_LATE(uint32_t) g_num_waitqs = 1;
373
374 /*
375 * Zero out the used MSBs of the event.
376 */
377 #define _CAST_TO_EVENT_MASK(event) \
378 ((waitq_flags_t)(event) & ((1ul << _EVENT_MASK_BITS) - 1ul))
379
380 static inline uint32_t
waitq_hash(char * key,size_t length)381 waitq_hash(char *key, size_t length)
382 {
383 return os_hash_jenkins(key, length) & (g_num_waitqs - 1);
384 }
385
386 /* return a global waitq pointer corresponding to the given event */
387 struct waitq *
_global_eventq(char * event,size_t event_length)388 _global_eventq(char *event, size_t event_length)
389 {
390 return &global_waitqs[waitq_hash(event, event_length)];
391 }
392
393 bool
waitq_is_valid(waitq_t waitq)394 waitq_is_valid(waitq_t waitq)
395 {
396 return waitq_valid(waitq);
397 }
398
399 static inline bool
waitq_is_global(waitq_t waitq)400 waitq_is_global(waitq_t waitq)
401 {
402 if (waitq_type(waitq) != WQT_QUEUE) {
403 return false;
404 }
405 return waitq.wq_q >= global_waitqs && waitq.wq_q < global_waitqs + g_num_waitqs;
406 }
407
408 static inline bool
waitq_empty(waitq_t wq)409 waitq_empty(waitq_t wq)
410 {
411 struct turnstile *ts;
412
413 switch (waitq_type(wq)) {
414 case WQT_TURNSTILE:
415 return priority_queue_empty(&wq.wq_q->waitq_prio_queue);
416 case WQT_PORT:
417 ts = wq.wq_q->waitq_ts;
418 return ts == TURNSTILE_NULL ||
419 priority_queue_empty(&ts->ts_waitq.waitq_prio_queue);
420 case WQT_QUEUE:
421 case WQT_SELECT:
422 case WQT_PORT_SET:
423 case WQT_SELECT_SET:
424 return circle_queue_empty(&wq.wq_q->waitq_queue);
425
426 default:
427 return true;
428 }
429 }
430
431 #if CONFIG_WAITQ_STATS
432 #define NWAITQ_BTFRAMES 5
433
434 struct wq_stats {
435 uint64_t waits;
436 uint64_t wakeups;
437 uint64_t clears;
438 uint64_t failed_wakeups;
439
440 uintptr_t last_wait[NWAITQ_BTFRAMES];
441 uintptr_t last_wakeup[NWAITQ_BTFRAMES];
442 uintptr_t last_failed_wakeup[NWAITQ_BTFRAMES];
443 };
444
445 /* this global is for lldb */
446 const uint32_t g_nwaitq_btframes = NWAITQ_BTFRAMES;
447 struct wq_stats g_boot_stats;
448 struct wq_stats *g_waitq_stats = &g_boot_stats;
449
450 static __inline__ void
waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES],unsigned skip)451 waitq_grab_backtrace(uintptr_t bt[NWAITQ_BTFRAMES], unsigned skip)
452 {
453 uintptr_t buf[NWAITQ_BTFRAMES + skip];
454
455 memset(buf, 0, (NWAITQ_BTFRAMES + skip) * sizeof(uintptr_t));
456 backtrace(buf, g_nwaitq_btframes + skip, NULL, NULL);
457 memcpy(&bt[0], &buf[skip], NWAITQ_BTFRAMES * sizeof(uintptr_t));
458 }
459
460 static __inline__ struct wq_stats *
waitq_global_stats(waitq_t waitq)461 waitq_global_stats(waitq_t waitq)
462 {
463 struct wq_stats *wqs;
464 uint32_t idx;
465
466 if (!waitq_is_global(waitq)) {
467 return NULL;
468 }
469
470 idx = (uint32_t)(waitq.wq_q - global_waitqs);
471 assert(idx < g_num_waitqs);
472 wqs = &g_waitq_stats[idx];
473 return wqs;
474 }
475
476 static __inline__ void
waitq_stats_count_wait(waitq_t waitq)477 waitq_stats_count_wait(waitq_t waitq)
478 {
479 struct wq_stats *wqs = waitq_global_stats(waitq);
480 if (wqs != NULL) {
481 wqs->waits++;
482 waitq_grab_backtrace(wqs->last_wait, 2);
483 }
484 }
485
486 static __inline__ void
waitq_stats_count_wakeup(waitq_t waitq,int n)487 waitq_stats_count_wakeup(waitq_t waitq, int n)
488 {
489 struct wq_stats *wqs = waitq_global_stats(waitq);
490 if (wqs != NULL) {
491 if (n > 0) {
492 wqs->wakeups += n;
493 waitq_grab_backtrace(wqs->last_wakeup, 2);
494 } else {
495 wqs->failed_wakeups++;
496 waitq_grab_backtrace(wqs->last_failed_wakeup, 2);
497 }
498 }
499 }
500
501 static __inline__ void
waitq_stats_count_clear_wakeup(waitq_t waitq)502 waitq_stats_count_clear_wakeup(waitq_t waitq)
503 {
504 struct wq_stats *wqs = waitq_global_stats(waitq);
505 if (wqs != NULL) {
506 wqs->wakeups++;
507 wqs->clears++;
508 waitq_grab_backtrace(wqs->last_wakeup, 2);
509 }
510 }
511 #else /* !CONFIG_WAITQ_STATS */
512 #define waitq_stats_count_wait(q) do { } while (0)
513 #define waitq_stats_count_wakeup(q, n) do { } while (0)
514 #define waitq_stats_count_clear_wakeup(q) do { } while (0)
515 #endif
516
517 static struct waitq *
waitq_get_safeq(waitq_t waitq)518 waitq_get_safeq(waitq_t waitq)
519 {
520 if (waitq_type(waitq) == WQT_PORT) {
521 struct turnstile *ts = waitq.wq_q->waitq_ts;
522 return ts ? &ts->ts_waitq : NULL;
523 }
524
525 uint32_t hash = os_hash_kernel_pointer(waitq.wq_q);
526 return &global_waitqs[hash & (g_num_waitqs - 1)];
527 }
528
529 /*
530 * Since the priority ordered waitq uses basepri as the
531 * ordering key assert that this value fits in a uint8_t.
532 */
533 static_assert(MAXPRI <= UINT8_MAX);
534
535 static inline void
waitq_thread_insert(struct waitq * safeq,thread_t thread,waitq_t wq,event64_t event)536 waitq_thread_insert(struct waitq *safeq, thread_t thread,
537 waitq_t wq, event64_t event)
538 {
539 if (waitq_type(safeq) == WQT_TURNSTILE) {
540 turnstile_stats_update(0, TSU_TURNSTILE_BLOCK_COUNT, NULL);
541 turnstile_waitq_add_thread_priority_queue(safeq, thread);
542 } else {
543 turnstile_stats_update(0, TSU_REGULAR_WAITQ_BLOCK_COUNT, NULL);
544 /*
545 * This is the extent to which we currently take scheduling
546 * attributes into account:
547 *
548 * - If the thread is vm privileged, we stick it at the front
549 * of the queue, later, these queues will honor the policy
550 * value set at waitq_init time.
551 *
552 * - Realtime threads get priority for wait queue placements.
553 * This allows wait_queue_wakeup_one to prefer a waiting
554 * realtime thread, similar in principle to performing
555 * a wait_queue_wakeup_all and allowing scheduler
556 * prioritization to run the realtime thread, but without
557 * causing the lock contention of that scenario.
558 */
559 if (thread->sched_pri >= BASEPRI_REALTIME ||
560 !safeq->waitq_fifo ||
561 (thread->options & TH_OPT_VMPRIV)) {
562 circle_enqueue_head(&safeq->waitq_queue, &thread->wait_links);
563 } else {
564 circle_enqueue_tail(&safeq->waitq_queue, &thread->wait_links);
565 }
566 }
567
568 /* mark the event and real waitq, even if enqueued on a global safeq */
569 thread->wait_event = event;
570 thread->waitq = wq;
571 }
572
573 /**
574 * clear the thread-related waitq state
575 *
576 * Conditions:
577 * 'thread' is locked
578 */
579 static inline void
thread_clear_waitq_state(thread_t thread)580 thread_clear_waitq_state(thread_t thread)
581 {
582 thread->waitq.wq_q = NULL;
583 thread->wait_event = NO_EVENT64;
584 thread->at_safe_point = FALSE;
585 }
586
587 static inline void
waitq_thread_remove(waitq_t wq,thread_t thread)588 waitq_thread_remove(waitq_t wq, thread_t thread)
589 {
590 if (waitq_type(wq) == WQT_TURNSTILE) {
591 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
592 (TURNSTILE_CODE(TURNSTILE_HEAP_OPERATIONS,
593 (THREAD_REMOVED_FROM_TURNSTILE_WAITQ))) | DBG_FUNC_NONE,
594 VM_KERNEL_UNSLIDE_OR_PERM(waitq_to_turnstile(wq.wq_q)),
595 thread_tid(thread), 0, 0, 0);
596 priority_queue_remove(&wq.wq_q->waitq_prio_queue,
597 &thread->wait_prioq_links);
598 } else {
599 circle_dequeue(&wq.wq_q->waitq_queue, &thread->wait_links);
600 if (waitq_is_global(wq) && waitq_empty(wq)) {
601 wq.wq_q->waitq_eventmask = 0;
602 }
603 }
604
605 thread_clear_waitq_state(thread);
606 }
607
608 __startup_func
609 static void
waitq_bootstrap(void)610 waitq_bootstrap(void)
611 {
612 const uint32_t qsz = sizeof(struct waitq);
613 vm_offset_t whsize;
614 int cpu = 0;
615
616 /*
617 * Determine the amount of memory we're willing to reserve for
618 * the waitqueue hash table
619 */
620 if (!PE_parse_boot_argn("wqsize", &whsize, sizeof(whsize))) {
621 whsize = round_page(thread_max * qsz / 5);
622 }
623
624 /*
625 * Determine the number of waitqueues we can fit.
626 * The hash algorithm requires that this be a power of 2.
627 */
628 g_num_waitqs = 0x80000000u >> __builtin_clzl(whsize / qsz);
629 assert(g_num_waitqs > 0);
630 whsize = round_page(g_num_waitqs * qsz);
631
632 kmem_alloc(kernel_map, (vm_offset_t *)&global_waitqs, whsize,
633 KMA_NOFAIL | KMA_KOBJECT | KMA_NOPAGEWAIT | KMA_PERMANENT,
634 VM_KERN_MEMORY_WAITQ);
635
636 #if CONFIG_WAITQ_STATS
637 whsize = round_page(g_num_waitqs * sizeof(struct wq_stats));
638 kmem_alloc(kernel_map, (vm_offset_t *)&g_waitq_stats, whsize,
639 KMA_NOFAIL | KMA_KOBJECT | KMA_NOPAGEWAIT | KMA_ZERO | KMA_PERMANENT,
640 VM_KERN_MEMORY_WAITQ);
641 #endif
642
643 for (uint32_t i = 0; i < g_num_waitqs; i++) {
644 waitq_init(&global_waitqs[i], WQT_QUEUE, SYNC_POLICY_FIFO);
645 }
646
647 waitq_init(&select_conflict_queue, WQT_SELECT, SYNC_POLICY_FIFO);
648
649 percpu_foreach(setid, select_setid) {
650 /* is not cpu_number() but CPUs haven't been numbered yet */
651 *setid = cpu++;
652 }
653 }
654 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, waitq_bootstrap);
655
656
657 #pragma mark locking
658
659 /*
660 * Double the standard lock timeout, because wait queues tend
661 * to iterate over a number of threads - locking each. If there is
662 * a problem with a thread lock, it normally times out at the wait
663 * queue level first, hiding the real problem.
664 */
665 /* For x86, the hardware timeout is in TSC units. */
666 #if defined(__i386__) || defined(__x86_64__)
667 #define waitq_timeout (2 * LockTimeOutTSC)
668 #else
669 #define waitq_timeout (2 * os_atomic_load(&LockTimeOut, relaxed))
670 #endif
671
672 static hw_lock_timeout_status_t
waitq_timeout_handler(void * _lock,uint64_t timeout,uint64_t start,uint64_t now,uint64_t interrupt_time)673 waitq_timeout_handler(void *_lock, uint64_t timeout,
674 uint64_t start, uint64_t now, uint64_t interrupt_time)
675 {
676 #pragma unused(interrupt_time)
677
678 lck_spinlock_to_info_t lsti;
679 hw_lck_ticket_t *lck = _lock;
680 hw_lck_ticket_t tmp;
681 struct waitq *wq = __container_of(lck, struct waitq, waitq_interlock);
682
683 if (machine_timeout_suspended()) {
684 return HW_LOCK_TIMEOUT_CONTINUE;
685 }
686
687 lsti = lck_spinlock_timeout_hit(lck, 0);
688 tmp.tcurnext = os_atomic_load(&lck->tcurnext, relaxed);
689
690 panic("waitq(%p) lock timeout after %llu ticks; cpu=%d, "
691 "cticket: 0x%x, nticket: 0x%x, waiting for 0x%x, "
692 #if INTERRUPT_MASKED_DEBUG
693 "interrupt time: %llu, "
694 #endif /* INTERRUPT_MASKED_DEBUG */
695 "start time: %llu, now: %llu, timeout: %llu",
696 wq, now - start, cpu_number(),
697 tmp.cticket, tmp.nticket, lsti->extra,
698 #if INTERRUPT_MASKED_DEBUG
699 interrupt_time,
700 #endif /* INTERRUPT_MASKED_DEBUG */
701 start, now, timeout);
702 }
703
704 void
waitq_invalidate(waitq_t waitq)705 waitq_invalidate(waitq_t waitq)
706 {
707 hw_lck_ticket_invalidate(&waitq.wq_q->waitq_interlock);
708 }
709
710 bool
waitq_held(waitq_t wq)711 waitq_held(waitq_t wq)
712 {
713 return hw_lck_ticket_held(&wq.wq_q->waitq_interlock);
714 }
715
716 void
waitq_lock(waitq_t wq)717 waitq_lock(waitq_t wq)
718 {
719 (void)hw_lck_ticket_lock_to(&wq.wq_q->waitq_interlock,
720 waitq_timeout, waitq_timeout_handler, &waitq_lck_grp);
721 #if defined(__x86_64__)
722 pltrace(FALSE);
723 #endif
724 }
725
726 bool
waitq_lock_try(waitq_t wq)727 waitq_lock_try(waitq_t wq)
728 {
729 bool rc = hw_lck_ticket_lock_try(&wq.wq_q->waitq_interlock, &waitq_lck_grp);
730
731 #if defined(__x86_64__)
732 if (rc) {
733 pltrace(FALSE);
734 }
735 #endif
736 return rc;
737 }
738
739 bool
waitq_lock_reserve(waitq_t wq,uint32_t * ticket)740 waitq_lock_reserve(waitq_t wq, uint32_t *ticket)
741 {
742 return hw_lck_ticket_reserve(&wq.wq_q->waitq_interlock, ticket, &waitq_lck_grp);
743 }
744
745 static hw_lock_status_t
waitq_lock_reserve_allow_invalid(waitq_t wq,uint32_t * ticket)746 waitq_lock_reserve_allow_invalid(waitq_t wq, uint32_t *ticket)
747 {
748 return hw_lck_ticket_reserve_allow_invalid(&wq.wq_q->waitq_interlock,
749 ticket, &waitq_lck_grp);
750 }
751
752 void
waitq_lock_wait(waitq_t wq,uint32_t ticket)753 waitq_lock_wait(waitq_t wq, uint32_t ticket)
754 {
755 (void)hw_lck_ticket_wait(&wq.wq_q->waitq_interlock, ticket,
756 waitq_timeout, waitq_timeout_handler, &waitq_lck_grp);
757 #if defined(__x86_64__)
758 pltrace(FALSE);
759 #endif
760 }
761
762 bool
waitq_lock_allow_invalid(waitq_t wq)763 waitq_lock_allow_invalid(waitq_t wq)
764 {
765 hw_lock_status_t rc;
766
767 rc = hw_lck_ticket_lock_allow_invalid(&wq.wq_q->waitq_interlock,
768 waitq_timeout, waitq_timeout_handler, &waitq_lck_grp);
769
770 #if defined(__x86_64__)
771 if (rc == HW_LOCK_ACQUIRED) {
772 pltrace(FALSE);
773 }
774 #endif
775 return rc == HW_LOCK_ACQUIRED;
776 }
777
778 void
waitq_unlock(waitq_t wq)779 waitq_unlock(waitq_t wq)
780 {
781 assert(waitq_held(wq));
782 #if defined(__x86_64__)
783 pltrace(TRUE);
784 #endif
785 hw_lck_ticket_unlock(&wq.wq_q->waitq_interlock);
786 }
787
788
789 #pragma mark assert_wait / wakeup
790
791 typedef thread_t (^waitq_select_cb)(struct waitq *waitq, thread_t thread);
792
793 struct waitq_select_args {
794 /* input parameters */
795 event64_t event;
796 waitq_select_cb select_cb;
797 int priority;
798 wait_result_t result;
799 waitq_options_t options;
800
801 /* output parameters */
802 uint32_t max_threads;
803 uint32_t nthreads;
804 spl_t spl;
805 circle_queue_head_t threadq;
806 };
807
808 static inline void
maybe_adjust_thread_pri(thread_t thread,int priority,__kdebug_only waitq_t waitq)809 maybe_adjust_thread_pri(thread_t thread, int priority,
810 __kdebug_only waitq_t waitq)
811 {
812 /*
813 * If the caller is requesting the waitq subsystem to promote the
814 * priority of the awoken thread, then boost the thread's priority to
815 * the default WAITQ_BOOST_PRIORITY (if it's not already equal or
816 * higher priority). This boost must be removed via a call to
817 * waitq_clear_promotion_locked before the thread waits again.
818 *
819 * WAITQ_PROMOTE_PRIORITY is -2.
820 * Anything above 0 represents a mutex promotion.
821 * The default 'no action' value is -1.
822 * TODO: define this in a header
823 */
824 if (priority == WAITQ_PROMOTE_PRIORITY) {
825 uintptr_t trace_waitq = 0;
826 if (__improbable(kdebug_enable)) {
827 trace_waitq = VM_KERNEL_UNSLIDE_OR_PERM(waitq.wq_q);
828 }
829
830 sched_thread_promote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, trace_waitq);
831 }
832 }
833
834 static void
waitq_select_queue_flush(waitq_t waitq,struct waitq_select_args * args)835 waitq_select_queue_flush(waitq_t waitq, struct waitq_select_args *args)
836 {
837 thread_t thread = THREAD_NULL;
838 __assert_only kern_return_t kr;
839
840 cqe_foreach_element_safe(thread, &args->threadq, wait_links) {
841 circle_dequeue(&args->threadq, &thread->wait_links);
842 maybe_adjust_thread_pri(thread, args->priority, waitq);
843 kr = thread_go(thread, args->result, args->options);
844 assert(kr == KERN_SUCCESS);
845 thread_unlock(thread);
846 }
847 }
848
849 /**
850 * Routine to iterate over the waitq for non-priority ordered waitqs
851 *
852 * Conditions:
853 * args->waitq (and the posted waitq) is locked
854 *
855 * Notes:
856 * Uses the optional select callback function to refine the selection
857 * of one or more threads from a waitq. The select callback is invoked
858 * once for every thread that is found to be waiting on the input args->waitq.
859 *
860 * If one or more threads are selected, this may disable interrupts.
861 * The previous interrupt state is returned in args->spl and should
862 * be used in a call to splx() if threads are returned to the caller.
863 */
864 static thread_t
waitq_queue_iterate_locked(struct waitq * safeq,struct waitq * waitq,struct waitq_select_args * args,waitq_flags_t * remaining_eventmask)865 waitq_queue_iterate_locked(struct waitq *safeq, struct waitq *waitq,
866 struct waitq_select_args *args, waitq_flags_t *remaining_eventmask)
867 {
868 thread_t thread = THREAD_NULL;
869 thread_t first_thread = THREAD_NULL;
870
871 cqe_foreach_element_safe(thread, &safeq->waitq_queue, wait_links) {
872 thread_t t = THREAD_NULL;
873 assert_thread_magic(thread);
874
875 /*
876 * For non-priority ordered waitqs, we allow multiple events to be
877 * mux'ed into the same waitq. Also safeqs may contain threads from
878 * multiple waitqs. Only pick threads that match the
879 * requested wait event.
880 */
881 if (waitq_same(thread->waitq, waitq) && thread->wait_event == args->event) {
882 t = thread;
883 if (first_thread == THREAD_NULL) {
884 first_thread = thread;
885 }
886
887 /* allow the caller to futher refine the selection */
888 if (args->select_cb) {
889 t = args->select_cb(waitq, thread);
890 }
891 if (t != THREAD_NULL) {
892 args->nthreads += 1;
893 if (args->nthreads == 1 && safeq == waitq) {
894 args->spl = splsched();
895 }
896 thread_lock(t);
897 thread_clear_waitq_state(t);
898 circle_dequeue(&safeq->waitq_queue, &thread->wait_links);
899 circle_enqueue_tail(&args->threadq, &t->wait_links);
900 /* only enqueue up to 'max' threads */
901 if (args->nthreads >= args->max_threads) {
902 break;
903 }
904 }
905 }
906
907 /* thread wasn't selected so track its event */
908 if (t == THREAD_NULL) {
909 *remaining_eventmask |= waitq_same(thread->waitq, safeq)
910 ? _CAST_TO_EVENT_MASK(thread->wait_event)
911 : _CAST_TO_EVENT_MASK(thread->waitq.wq_q);
912 }
913 }
914
915 return first_thread;
916 }
917
918 /**
919 * Routine to iterate and remove threads from priority ordered waitqs
920 *
921 * Conditions:
922 * args->waitq (and the posted waitq) is locked
923 *
924 * Notes:
925 * The priority ordered waitqs only support maximum priority element removal.
926 *
927 * Also, the implementation makes sure that all threads in a priority ordered
928 * waitq are waiting on the same wait event. This is not necessarily true for
929 * non-priority ordered waitqs. If one or more threads are selected, this may
930 * disable interrupts. The previous interrupt state is returned in args->spl
931 * and should be used in a call to splx() if threads are returned to the caller.
932 *
933 * In the future, we could support priority ordered waitqs with multiple wait
934 * events in the same queue. The way to implement that would be to keep removing
935 * elements from the waitq and if the event does not match the requested one,
936 * add it to a local list. This local list of elements needs to be re-inserted
937 * into the priority queue at the end and the select_cb return value &
938 * remaining_eventmask would need to be handled appropriately. The implementation
939 * is not very efficient but would work functionally.
940 */
941 static thread_t
waitq_prioq_iterate_locked(struct waitq * safeq,struct waitq * waitq,struct waitq_select_args * args,waitq_flags_t * remaining_eventmask)942 waitq_prioq_iterate_locked(struct waitq *safeq, struct waitq *waitq,
943 struct waitq_select_args *args, waitq_flags_t *remaining_eventmask)
944 {
945 thread_t first_thread = THREAD_NULL;
946 thread_t thread = THREAD_NULL;
947
948 /*
949 * The only possible values for remaining_eventmask for the priority queue
950 * waitq are either 0 (for the remove all threads case) or the original
951 * safeq->waitq_eventmask (for the lookup/remove one thread cases).
952 */
953 *remaining_eventmask = safeq->waitq_eventmask;
954
955 while (args->nthreads < args->max_threads) {
956 if (priority_queue_empty(&(safeq->waitq_prio_queue))) {
957 *remaining_eventmask = 0;
958 break;
959 }
960
961 thread = priority_queue_remove_max(&safeq->waitq_prio_queue,
962 struct thread, wait_prioq_links);
963
964 /*
965 * Ensure the wait event matches since priority ordered waitqs do not
966 * support multiple events in the same waitq.
967 */
968 assert(waitq_same(thread->waitq, waitq) && (thread->wait_event == args->event));
969
970 if (args->select_cb) {
971 /*
972 * Call the select_cb passed into the waitq_select args. The callback
973 * updates the select_ctx with information about the highest priority
974 * thread which is eventually used by the caller.
975 */
976 thread_t __assert_only ret_thread = args->select_cb(waitq, thread);
977 assert(ret_thread == thread);
978 }
979
980 if (first_thread == THREAD_NULL) {
981 first_thread = thread;
982 /*
983 * turnstile_kernel_update_inheritor_on_wake_locked will lock
984 * first_thread, so call it before locking it.
985 */
986 if (args->priority == WAITQ_PROMOTE_ON_WAKE &&
987 first_thread != THREAD_NULL &&
988 waitq_type(safeq) == WQT_TURNSTILE) {
989 turnstile_kernel_update_inheritor_on_wake_locked(waitq_to_turnstile(safeq),
990 (turnstile_inheritor_t)first_thread, TURNSTILE_INHERITOR_THREAD);
991 }
992 }
993
994 /* Add the thread to the result thread list */
995 args->nthreads += 1;
996 if (args->nthreads == 1 && safeq == waitq) {
997 args->spl = splsched();
998 }
999 thread_lock(thread);
1000 thread_clear_waitq_state(thread);
1001 circle_enqueue_tail(&args->threadq, &thread->wait_links);
1002 }
1003
1004 return first_thread;
1005 }
1006
1007 /**
1008 * @function do_waitq_select_n_locked_queue
1009 *
1010 * @brief
1011 * Selects threads waiting on a wait queue.
1012 *
1013 * @discussion
1014 * @c waitq is locked.
1015 * If @c waitq is a set, then the wait queue posting to it is locked too.
1016 *
1017 * Uses the optional select callback function to refine the selection
1018 * of one or more threads from a waitq.
1019 *
1020 * The select callback is invoked once for every thread that
1021 * is found to be waiting on the input args->waitq.
1022 *
1023 * If one or more threads are selected, this may disable interrupts.
1024 * The previous interrupt state is returned in args->spl and should
1025 * be used in a call to splx() if threads are returned to the caller.
1026 */
1027 static void
do_waitq_select_n_locked_queue(waitq_t waitq,struct waitq_select_args * args)1028 do_waitq_select_n_locked_queue(waitq_t waitq, struct waitq_select_args *args)
1029 {
1030 thread_t first_thread = THREAD_NULL;
1031 struct waitq *safeq;
1032 waitq_flags_t remaining_eventmask = 0;
1033 waitq_flags_t eventmask;
1034
1035 if (waitq_irq_safe(waitq)) {
1036 eventmask = _CAST_TO_EVENT_MASK(args->event);
1037 safeq = waitq.wq_q;
1038 } else {
1039 /* JMM - add flag to waitq to avoid global lookup if no waiters */
1040 eventmask = _CAST_TO_EVENT_MASK(waitq.wq_q);
1041 safeq = waitq_get_safeq(waitq);
1042 if (safeq == NULL) {
1043 return;
1044 }
1045
1046 if (args->nthreads == 0) {
1047 args->spl = splsched();
1048 }
1049 waitq_lock(safeq);
1050 }
1051
1052 /*
1053 * If the safeq doesn't have an eventmask (not global) or the event
1054 * we're looking for IS set in its eventmask, then scan the threads
1055 * in that queue for ones that match the original <waitq,event> pair.
1056 */
1057 if (waitq_type(safeq) == WQT_TURNSTILE) {
1058 first_thread = waitq_prioq_iterate_locked(safeq, waitq.wq_q,
1059 args, &remaining_eventmask);
1060 } else if (!waitq_is_global(safeq) ||
1061 (safeq->waitq_eventmask & eventmask) == eventmask) {
1062 first_thread = waitq_queue_iterate_locked(safeq, waitq.wq_q,
1063 args, &remaining_eventmask);
1064
1065 /*
1066 * Update the eventmask of global queues we just scanned:
1067 * - If we selected all the threads in the queue,
1068 * we can clear its eventmask.
1069 *
1070 * - If we didn't find enough threads to fill our needs,
1071 * then we can assume we looked at every thread in the queue
1072 * and the mask we computed is complete - so reset it.
1073 */
1074 if (waitq_is_global(safeq)) {
1075 if (waitq_empty(safeq)) {
1076 safeq->waitq_eventmask = 0;
1077 } else if (args->nthreads < args->max_threads) {
1078 safeq->waitq_eventmask = remaining_eventmask;
1079 }
1080 }
1081 }
1082
1083 /*
1084 * Grab the first thread in the queue if no other thread was selected.
1085 * We can guarantee that no one has manipulated this thread because
1086 * it's waiting on the given waitq, and we have that waitq locked.
1087 */
1088 if (args->nthreads == 0 && first_thread != THREAD_NULL) {
1089 /* we know this is the first (and only) thread */
1090 args->nthreads += 1;
1091 if (safeq == waitq.wq_q) {
1092 args->spl = splsched();
1093 }
1094
1095 thread_lock(first_thread);
1096 waitq_thread_remove(safeq, first_thread);
1097 circle_enqueue_tail(&args->threadq, &first_thread->wait_links);
1098 }
1099
1100 /* unlock the safe queue if we locked one above */
1101 if (!waitq_same(waitq, safeq)) {
1102 waitq_unlock(safeq);
1103 if (args->nthreads == 0) {
1104 splx(args->spl);
1105 args->spl = 0;
1106 }
1107 }
1108 }
1109
1110 /**
1111 * @function do_waitq_link_select_n_locked()
1112 *
1113 * @brief
1114 * Selects threads waiting on any set a wait queue belongs to,
1115 * or preposts the wait queue onto them.
1116 *
1117 * @discussion
1118 * @c waitq is locked.
1119 */
1120 __attribute__((noinline))
1121 static void
do_waitq_select_n_locked_sets(waitq_t waitq,struct waitq_select_args * args)1122 do_waitq_select_n_locked_sets(waitq_t waitq, struct waitq_select_args *args)
1123 {
1124 waitq_type_t wq_type = waitq_type(waitq);
1125 waitq_link_t link;
1126 hw_lock_status_t st;
1127 uint32_t ticket;
1128
1129 assert(args->event == NO_EVENT64);
1130 assert(waitq_preposts(waitq));
1131
1132 waitq_link_foreach(link, waitq) {
1133 waitq_t wqset = wql_wqs(link);
1134
1135 if (wql_wqs_preposted(link)) {
1136 /*
1137 * The wql_wqs_preposted() bit is cleared
1138 * under both the wq/wqset lock.
1139 *
1140 * If the wqset is still preposted,
1141 * we really won't find threads there.
1142 *
1143 * Just mark the waitq as preposted and move on.
1144 */
1145 if (wq_type == WQT_PORT) {
1146 waitq.wq_q->waitq_preposted = true;
1147 }
1148 continue;
1149 }
1150
1151 if (wq_type == WQT_SELECT) {
1152 /*
1153 * If PGZ picked this select set,
1154 * translate it to the real address
1155 *
1156 * If it is still a select set
1157 * (the slot could have been reused),
1158 * then keep using it for the rest of the logic.
1159 *
1160 * Even in the extremely unlikely case where
1161 * the slot was reused for another select_set,
1162 * the `wql_sellink_valid` check below will
1163 * take care of debouncing it. But we must
1164 * forget the original pointer we read
1165 * so that we unlock the proper object.
1166 */
1167 wqset.wqs_sel = pgz_decode_allow_invalid(wqset.wqs_sel,
1168 ZONE_ID_SELECT_SET);
1169 if (!wqset.wqs_sel) {
1170 continue;
1171 }
1172 st = waitq_lock_reserve_allow_invalid(wqset, &ticket);
1173 if (st == HW_LOCK_INVALID) {
1174 continue;
1175 }
1176 } else {
1177 static_assert(HW_LOCK_CONTENDED == 0);
1178 st = waitq_lock_reserve(wqset, &ticket);
1179 }
1180 if (st == HW_LOCK_CONTENDED) {
1181 if (!circle_queue_empty(&args->threadq)) {
1182 /*
1183 * We are holding several thread locks.
1184 *
1185 * If we fail to acquire this waitq set lock,
1186 * it is possible that another core is holding
1187 * that (non IRQ-safe) waitq set lock,
1188 * while an interrupt is trying to grab the
1189 * thread lock of ones of those threads.
1190 *
1191 * In order to avoid deadlocks, flush out
1192 * the queue of threads.
1193 *
1194 * Note: this code will never run for `identify`
1195 * variants (when `max_threads` is 1).
1196 */
1197 assert(args->max_threads > 1);
1198 waitq_select_queue_flush(waitq, args);
1199 }
1200 waitq_lock_wait(wqset, ticket);
1201 }
1202
1203 if (wq_type == WQT_SELECT) {
1204 if (!wql_sellink_valid(wqset.wqs_sel, link.wqls)) {
1205 goto out_unlock;
1206 }
1207 } else if (!waitq_valid(wqset)) {
1208 goto out_unlock;
1209 }
1210
1211 /*
1212 * Find any threads waiting on this wait queue set as a queue.
1213 */
1214 do_waitq_select_n_locked_queue(wqset, args);
1215
1216 if (args->nthreads == 0) {
1217 /* No thread selected: prepost 'waitq' to 'wqset' */
1218 wql_wqs_mark_preposted(link);
1219 if (wq_type == WQT_SELECT) {
1220 wqset.wqs_sel->selset_preposted = true;
1221 } else {
1222 waitq.wq_q->waitq_preposted = true;
1223 circle_dequeue(&wqset.wqs_set->wqset_links,
1224 &link.wqll->wql_slink);
1225 circle_enqueue_tail(&wqset.wqs_set->wqset_preposts,
1226 &link.wqll->wql_slink);
1227 ipc_pset_prepost(wqset.wqs_set, waitq.wq_q);
1228 }
1229 }
1230
1231 out_unlock:
1232 waitq_unlock(wqset);
1233
1234 if (args->nthreads >= args->max_threads) {
1235 break;
1236 }
1237 }
1238 }
1239
1240 /**
1241 * @function do_waitq_select_n_locked
1242 *
1243 * @brief
1244 * Selects threads waiting on a wait queue, or preposts it.
1245 *
1246 * @discussion
1247 * @c waitq is locked.
1248 *
1249 * Recurses into all sets this wait queue belongs to.
1250 */
1251 static void
do_waitq_select_n_locked(waitq_t waitq,struct waitq_select_args * args)1252 do_waitq_select_n_locked(waitq_t waitq, struct waitq_select_args *args)
1253 {
1254 do_waitq_select_n_locked_queue(waitq, args);
1255
1256 if (args->nthreads >= args->max_threads) {
1257 /* already enough threads found */
1258 return;
1259 }
1260
1261 if (args->event != NO_EVENT64 || !waitq_preposts(waitq)) {
1262 /* this wakeup should not recurse into sets */
1263 return;
1264 }
1265
1266 do_waitq_select_n_locked_sets(waitq, args);
1267 }
1268
1269 static inline bool
waitq_is_preposted_set(waitq_t waitq)1270 waitq_is_preposted_set(waitq_t waitq)
1271 {
1272 switch (waitq_type(waitq)) {
1273 case WQT_PORT_SET:
1274 return waitq_set_first_prepost(waitq.wqs_set, WQS_PREPOST_PEEK) != NULL;
1275
1276 case WQT_SELECT_SET:
1277 return waitq.wqs_sel->selset_preposted;
1278
1279 default:
1280 return false;
1281 }
1282 }
1283
1284 wait_result_t
waitq_assert_wait64_locked(waitq_t waitq,event64_t wait_event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint64_t deadline,uint64_t leeway,thread_t thread)1285 waitq_assert_wait64_locked(waitq_t waitq,
1286 event64_t wait_event,
1287 wait_interrupt_t interruptible,
1288 wait_timeout_urgency_t urgency,
1289 uint64_t deadline,
1290 uint64_t leeway,
1291 thread_t thread)
1292 {
1293 wait_result_t wait_result;
1294 struct waitq *safeq;
1295 uintptr_t eventmask;
1296 spl_t s;
1297
1298 switch (waitq_type(waitq)) {
1299 case WQT_PORT:
1300 case WQT_SELECT:
1301 case WQT_PORT_SET:
1302 case WQT_SELECT_SET:
1303 assert(wait_event == NO_EVENT64);
1304 break;
1305 default:
1306 assert(wait_event != NO_EVENT64);
1307 break;
1308 }
1309
1310 /*
1311 * Warning: Do _not_ place debugging print statements here.
1312 * The waitq is locked!
1313 */
1314 assert(!thread->started || thread == current_thread());
1315
1316 if (!waitq_wait_possible(thread)) {
1317 panic("thread already waiting on %p", thread->waitq.wq_q);
1318 }
1319
1320 s = splsched();
1321
1322 /*
1323 * early-out if the thread is waiting on a wait queue set
1324 * that has already been pre-posted.
1325 *
1326 * Note: waitq_is_preposted_set() may unlock the waitq-set
1327 */
1328 if (waitq_is_preposted_set(waitq)) {
1329 thread_lock(thread);
1330 thread->wait_result = THREAD_AWAKENED;
1331 thread_unlock(thread);
1332 splx(s);
1333 return THREAD_AWAKENED;
1334 }
1335
1336 /*
1337 * If already dealing with an irq safe wait queue, we are all set.
1338 * Otherwise, determine a global queue to use and lock it.
1339 */
1340 if (waitq_irq_safe(waitq)) {
1341 safeq = waitq.wq_q;
1342 eventmask = _CAST_TO_EVENT_MASK(wait_event);
1343 } else {
1344 safeq = waitq_get_safeq(waitq);
1345 if (__improbable(safeq == NULL)) {
1346 panic("Trying to assert_wait on a turnstile proxy "
1347 "that hasn't been donated one (waitq: %p)", waitq.wq_q);
1348 }
1349 eventmask = _CAST_TO_EVENT_MASK(waitq.wq_q);
1350 waitq_lock(safeq);
1351 }
1352
1353 /* lock the thread now that we have the irq-safe waitq locked */
1354 thread_lock(thread);
1355
1356 wait_result = thread_mark_wait_locked(thread, interruptible);
1357 /* thread->wait_result has been set */
1358 if (wait_result == THREAD_WAITING) {
1359 waitq_thread_insert(safeq, thread, waitq, wait_event);
1360
1361 if (deadline != 0) {
1362 boolean_t act;
1363
1364 act = timer_call_enter_with_leeway(thread->wait_timer,
1365 NULL,
1366 deadline, leeway,
1367 urgency, FALSE);
1368 if (!act) {
1369 thread->wait_timer_active++;
1370 }
1371 thread->wait_timer_is_set = TRUE;
1372 }
1373
1374 if (waitq_is_global(safeq)) {
1375 safeq->waitq_eventmask |= (waitq_flags_t)eventmask;
1376 }
1377
1378 waitq_stats_count_wait(waitq);
1379 }
1380
1381 /* unlock the thread */
1382 thread_unlock(thread);
1383
1384 /* update the inheritor's thread priority if the waitq is embedded in turnstile */
1385 if (waitq_type(safeq) == WQT_TURNSTILE && wait_result == THREAD_WAITING) {
1386 turnstile_recompute_priority_locked(waitq_to_turnstile(safeq));
1387 turnstile_update_inheritor_locked(waitq_to_turnstile(safeq));
1388 }
1389
1390 /* unlock the safeq if we locked it here */
1391 if (!waitq_same(waitq, safeq)) {
1392 waitq_unlock(safeq);
1393 }
1394
1395 splx(s);
1396
1397 return wait_result;
1398 }
1399
1400 bool
waitq_pull_thread_locked(waitq_t waitq,thread_t thread)1401 waitq_pull_thread_locked(waitq_t waitq, thread_t thread)
1402 {
1403 struct waitq *safeq;
1404 uint32_t ticket;
1405
1406 assert_thread_magic(thread);
1407
1408 /* Find the interrupts disabled queue thread is waiting on */
1409 if (waitq_irq_safe(waitq)) {
1410 safeq = waitq.wq_q;
1411 } else {
1412 safeq = waitq_get_safeq(waitq);
1413 if (__improbable(safeq == NULL)) {
1414 panic("Trying to clear_wait on a turnstile proxy "
1415 "that hasn't been donated one (waitq: %p)", waitq.wq_q);
1416 }
1417 }
1418
1419 /*
1420 * thread is already locked so have to try for the waitq lock.
1421 *
1422 * We can't wait for the waitq lock under the thread lock,
1423 * however we can reserve our slot in the lock queue,
1424 * and if that reservation requires waiting, we are guaranteed
1425 * that this waitq can't die until we got our turn!
1426 */
1427 if (!waitq_lock_reserve(safeq, &ticket)) {
1428 thread_unlock(thread);
1429 waitq_lock_wait(safeq, ticket);
1430 thread_lock(thread);
1431
1432 if (!waitq_same(waitq, thread->waitq)) {
1433 /*
1434 * While we were waiting for our reservation the thread
1435 * stopped waiting on this waitq, bail out.
1436 */
1437 waitq_unlock(safeq);
1438 return false;
1439 }
1440 }
1441
1442 waitq_thread_remove(safeq, thread);
1443 waitq_stats_count_clear_wakeup(waitq);
1444 waitq_unlock(safeq);
1445 return true;
1446 }
1447
1448
1449 void
waitq_clear_promotion_locked(waitq_t waitq,thread_t thread)1450 waitq_clear_promotion_locked(waitq_t waitq, thread_t thread)
1451 {
1452 spl_t s = 0;
1453
1454 assert(waitq_held(waitq));
1455 assert(thread != THREAD_NULL);
1456 assert(thread == current_thread());
1457
1458 /* This flag is only cleared by the thread itself, so safe to check outside lock */
1459 if ((thread->sched_flags & TH_SFLAG_WAITQ_PROMOTED) != TH_SFLAG_WAITQ_PROMOTED) {
1460 return;
1461 }
1462
1463 if (!waitq_irq_safe(waitq)) {
1464 s = splsched();
1465 }
1466 thread_lock(thread);
1467
1468 sched_thread_unpromote_reason(thread, TH_SFLAG_WAITQ_PROMOTED, 0);
1469
1470 thread_unlock(thread);
1471 if (!waitq_irq_safe(waitq)) {
1472 splx(s);
1473 }
1474 }
1475
1476 kern_return_t
waitq_wakeup64_all_locked(waitq_t waitq,event64_t wake_event,wait_result_t result,int priority,waitq_lock_state_t lock_state)1477 waitq_wakeup64_all_locked(waitq_t waitq,
1478 event64_t wake_event,
1479 wait_result_t result,
1480 int priority,
1481 waitq_lock_state_t lock_state)
1482 {
1483 struct waitq_select_args args = {
1484 .event = wake_event,
1485 .priority = priority,
1486 .max_threads = UINT32_MAX,
1487 .result = result,
1488 .options = WQ_OPTION_NONE,
1489 };
1490
1491 assert(waitq_held(waitq));
1492
1493 do_waitq_select_n_locked(waitq, &args);
1494 waitq_stats_count_wakeup(waitq, args.nthreads);
1495
1496 if (lock_state == WAITQ_UNLOCK) {
1497 waitq_unlock(waitq);
1498 }
1499
1500 waitq_select_queue_flush(waitq, &args);
1501
1502 if (args.nthreads > 0) {
1503 splx(args.spl);
1504 return KERN_SUCCESS;
1505 }
1506
1507 return KERN_NOT_WAITING;
1508 }
1509
1510 kern_return_t
waitq_wakeup64_one_locked(waitq_t waitq,event64_t wake_event,wait_result_t result,int priority,waitq_lock_state_t lock_state,waitq_options_t option)1511 waitq_wakeup64_one_locked(waitq_t waitq,
1512 event64_t wake_event,
1513 wait_result_t result,
1514 int priority,
1515 waitq_lock_state_t lock_state,
1516 waitq_options_t option)
1517 {
1518 struct waitq_select_args args = {
1519 .event = wake_event,
1520 .priority = priority,
1521 .max_threads = 1,
1522 .result = result,
1523 .options = option,
1524 };
1525
1526 assert(waitq_held(waitq));
1527
1528 do_waitq_select_n_locked(waitq, &args);
1529 waitq_stats_count_wakeup(waitq, args.nthreads);
1530
1531 if (lock_state == WAITQ_UNLOCK) {
1532 waitq_unlock(waitq);
1533 }
1534
1535 waitq_select_queue_flush(waitq, &args);
1536
1537 if (args.nthreads > 0) {
1538 splx(args.spl);
1539 return KERN_SUCCESS;
1540 }
1541
1542 return KERN_NOT_WAITING;
1543 }
1544
1545 thread_t
waitq_wakeup64_identify_locked(waitq_t waitq,event64_t wake_event,wait_result_t result,spl_t * spl,int priority,waitq_lock_state_t lock_state)1546 waitq_wakeup64_identify_locked(waitq_t waitq,
1547 event64_t wake_event,
1548 wait_result_t result,
1549 spl_t *spl,
1550 int priority,
1551 waitq_lock_state_t lock_state)
1552 {
1553 struct waitq_select_args args = {
1554 .event = wake_event,
1555 .priority = priority,
1556 .max_threads = 1,
1557 };
1558 thread_t thread = THREAD_NULL;
1559
1560 assert(waitq_held(waitq));
1561
1562 do_waitq_select_n_locked(waitq, &args);
1563 waitq_stats_count_wakeup(waitq, args.nthreads);
1564
1565 if (lock_state == WAITQ_UNLOCK) {
1566 waitq_unlock(waitq);
1567 }
1568
1569 if (args.nthreads > 0) {
1570 kern_return_t __assert_only ret;
1571
1572 thread = cqe_dequeue_head(&args.threadq, struct thread, wait_links);
1573 assert(args.nthreads == 1 && circle_queue_empty(&args.threadq));
1574
1575 maybe_adjust_thread_pri(thread, priority, waitq);
1576 ret = thread_go(thread, result, WQ_OPTION_NONE);
1577 assert(ret == KERN_SUCCESS);
1578 *spl = args.spl;
1579 }
1580
1581 return thread; /* locked if not NULL (caller responsible for spl) */
1582 }
1583
1584 kern_return_t
waitq_wakeup64_thread_and_unlock(struct waitq * waitq,event64_t event,thread_t thread,wait_result_t result)1585 waitq_wakeup64_thread_and_unlock(struct waitq *waitq, event64_t event,
1586 thread_t thread, wait_result_t result)
1587 {
1588 kern_return_t ret = KERN_NOT_WAITING;
1589
1590 assert(waitq_irq_safe(waitq));
1591 assert(waitq_held(waitq));
1592 assert_thread_magic(thread);
1593
1594 /*
1595 * See if the thread was still waiting there. If so, it got
1596 * dequeued and returned locked.
1597 */
1598 thread_lock(thread);
1599
1600 if (waitq_same(thread->waitq, waitq) && thread->wait_event == event) {
1601 waitq_thread_remove(waitq, thread);
1602 ret = KERN_SUCCESS;
1603 }
1604 waitq_stats_count_wakeup(waitq, ret == KERN_SUCCESS ? 1 : 0);
1605
1606 waitq_unlock(waitq);
1607
1608 if (ret == KERN_SUCCESS) {
1609 ret = thread_go(thread, result, WQ_OPTION_NONE);
1610 assert(ret == KERN_SUCCESS);
1611 }
1612
1613 thread_unlock(thread);
1614
1615 return ret;
1616 }
1617
1618
1619 #pragma mark waitq
1620
1621 __attribute__((always_inline))
1622 void
waitq_init(waitq_t waitq,waitq_type_t type,int policy)1623 waitq_init(waitq_t waitq, waitq_type_t type, int policy)
1624 {
1625 assert((policy & SYNC_POLICY_FIXED_PRIORITY) == 0);
1626
1627 *waitq.wq_q = (struct waitq){
1628 .waitq_type = type,
1629 .waitq_fifo = ((policy & SYNC_POLICY_REVERSED) == 0),
1630 };
1631
1632 switch (type) {
1633 case WQT_INVALID:
1634 __builtin_trap();
1635
1636 case WQT_TURNSTILE:
1637 /* For turnstile, initialize it as a priority queue */
1638 priority_queue_init(&waitq.wq_q->waitq_prio_queue);
1639 assert(waitq.wq_q->waitq_fifo == 0);
1640 break;
1641
1642 case WQT_PORT:
1643 waitq.wq_q->waitq_ts = TURNSTILE_NULL;
1644 break;
1645
1646 case WQT_PORT_SET:
1647 circle_queue_init(&waitq.wqs_set->wqset_preposts);
1648 OS_FALLTHROUGH;
1649 case WQT_SELECT_SET:
1650 case WQT_QUEUE:
1651 case WQT_SELECT:
1652 circle_queue_init(&waitq.wq_q->waitq_queue);
1653 break;
1654 }
1655
1656 if (policy & SYNC_POLICY_INIT_LOCKED) {
1657 hw_lck_ticket_init_locked(&waitq.wq_q->waitq_interlock, &waitq_lck_grp);
1658 } else {
1659 hw_lck_ticket_init(&waitq.wq_q->waitq_interlock, &waitq_lck_grp);
1660 }
1661 }
1662
1663 void
waitq_deinit(waitq_t waitq)1664 waitq_deinit(waitq_t waitq)
1665 {
1666 waitq_type_t type = waitq_type(waitq);
1667
1668 switch (type) {
1669 case WQT_QUEUE:
1670 assert(circle_queue_empty(&waitq.wq_q->waitq_queue));
1671 waitq_invalidate(waitq);
1672 break;
1673
1674 case WQT_TURNSTILE:
1675 assert(priority_queue_empty(&waitq.wq_q->waitq_prio_queue));
1676 assert(waitq.wq_q->waitq_inheritor == TURNSTILE_INHERITOR_NULL);
1677 waitq_invalidate(waitq);
1678 break;
1679
1680 case WQT_PORT:
1681 assert(waitq.wq_q->waitq_ts == TURNSTILE_NULL);
1682 assert(circle_queue_empty(&waitq.wq_q->waitq_links));
1683 break;
1684
1685 case WQT_SELECT:
1686 assert(waitq.wq_q->waitq_sellinks.next == NULL);
1687 assert(circle_queue_empty(&waitq.wqs_set->wqset_queue));
1688 break;
1689
1690 case WQT_PORT_SET:
1691 assert(circle_queue_empty(&waitq.wqs_set->wqset_queue));
1692 assert(circle_queue_empty(&waitq.wqs_set->wqset_links));
1693 assert(circle_queue_empty(&waitq.wqs_set->wqset_preposts));
1694 break;
1695
1696 default:
1697 panic("invalid wait type: %p/%d", waitq.wq_q, type);
1698 }
1699
1700 /*
1701 * The waitq must have been invalidated, or hw_lck_ticket_destroy()
1702 * below won't wait for reservations from waitq_lock_reserve(),
1703 * waitq_lock_reserve_allow_invalid() or waitq_lock_allow_invalid().
1704 */
1705 assert(!waitq_valid(waitq.wqs_set));
1706 hw_lck_ticket_destroy(&waitq.wq_q->waitq_interlock, &waitq_lck_grp);
1707 }
1708
1709
1710 #pragma mark port-set sets
1711
1712 void
waitq_set_unlink_all_locked(struct waitq_set * wqset,waitq_link_list_t * free_l)1713 waitq_set_unlink_all_locked(struct waitq_set *wqset, waitq_link_list_t *free_l)
1714 {
1715 uint32_t batch = waitq_set_unlink_batch;
1716
1717 waitq_invalidate(wqset);
1718
1719 for (;;) {
1720 struct waitq_link *link;
1721 queue_entry_t elt;
1722 circle_queue_t q;
1723 struct waitq *wq;
1724 uint32_t ticket;
1725 bool stable = true;
1726
1727 if (!circle_queue_empty(&wqset->wqset_links)) {
1728 q = &wqset->wqset_links;
1729 } else if (!circle_queue_empty(&wqset->wqset_preposts)) {
1730 q = &wqset->wqset_preposts;
1731 } else {
1732 break;
1733 }
1734
1735 if (batch-- == 0) {
1736 waitq_unlock(wqset);
1737 waitq_lock(wqset);
1738 batch = waitq_set_unlink_batch;
1739 continue;
1740 }
1741
1742 elt = circle_queue_first(q);
1743 link = cqe_element(elt, struct waitq_link, wql_slink);
1744 wq = link->wql_wq;
1745
1746 if (__improbable(!waitq_lock_reserve(wq, &ticket))) {
1747 waitq_unlock(wqset);
1748 waitq_lock_wait(wq, ticket);
1749 waitq_lock(wqset);
1750 stable = (elt == circle_queue_first(q) && link->wql_wq == wq);
1751 }
1752
1753 if (stable) {
1754 circle_dequeue(q, &link->wql_slink);
1755 circle_dequeue(&wq->waitq_links, &link->wql_qlink);
1756 wql_list_push(free_l, link);
1757 }
1758
1759 waitq_unlock(wq);
1760 }
1761 }
1762
1763 void
waitq_clear_prepost_locked(struct waitq * waitq)1764 waitq_clear_prepost_locked(struct waitq *waitq)
1765 {
1766 assert(waitq_type(waitq) == WQT_PORT);
1767 waitq->waitq_preposted = false;
1768 }
1769
1770 void
1771 waitq_set_foreach_member_locked(struct waitq_set *wqs, void (^cb)(struct waitq *))
1772 {
1773 struct waitq_link *link;
1774
1775 cqe_foreach_element(link, &wqs->wqset_links, wql_slink) {
1776 cb(link->wql_wq);
1777 }
1778
1779 cqe_foreach_element(link, &wqs->wqset_preposts, wql_slink) {
1780 cb(link->wql_wq);
1781 }
1782 }
1783
1784 __abortlike
1785 static void
__waitq_link_arguments_panic(struct waitq * waitq,struct waitq_set * wqset)1786 __waitq_link_arguments_panic(struct waitq *waitq, struct waitq_set *wqset)
1787 {
1788 if (!waitq_valid(waitq)) {
1789 panic("Invalid waitq: %p", waitq);
1790 }
1791 if (waitq_type(waitq) != WQT_PORT) {
1792 panic("Invalid waitq type: %p:%d", waitq, waitq->waitq_type);
1793 }
1794 panic("Invalid waitq-set: %p", wqset);
1795 }
1796
1797 static inline void
__waitq_link_arguments_validate(struct waitq * waitq,struct waitq_set * wqset)1798 __waitq_link_arguments_validate(struct waitq *waitq, struct waitq_set *wqset)
1799 {
1800 if (!waitq_valid(waitq) ||
1801 waitq_type(waitq) != WQT_PORT ||
1802 waitq_type(wqset) != WQT_PORT_SET) {
1803 __waitq_link_arguments_panic(waitq, wqset);
1804 }
1805 }
1806
1807 __abortlike
1808 static void
__waitq_invalid_panic(waitq_t waitq)1809 __waitq_invalid_panic(waitq_t waitq)
1810 {
1811 panic("Invalid waitq: %p", waitq.wq_q);
1812 }
1813
1814 static void
__waitq_validate(waitq_t waitq)1815 __waitq_validate(waitq_t waitq)
1816 {
1817 if (!waitq_valid(waitq)) {
1818 __waitq_invalid_panic(waitq);
1819 }
1820 }
1821
1822 kern_return_t
waitq_link_locked(struct waitq * waitq,struct waitq_set * wqset,waitq_link_t * linkp)1823 waitq_link_locked(struct waitq *waitq, struct waitq_set *wqset,
1824 waitq_link_t *linkp)
1825 {
1826 assert(linkp->wqlh);
1827
1828 __waitq_link_arguments_validate(waitq, wqset);
1829
1830 if (wql_find(waitq, wqset)) {
1831 return KERN_ALREADY_IN_SET;
1832 }
1833
1834 linkp->wqll->wql_wq = waitq;
1835 linkp->wqll->wql_wqs = (uintptr_t)wqset;
1836
1837 if (waitq_valid(wqset)) {
1838 circle_enqueue_tail(&wqset->wqset_links, &linkp->wqll->wql_slink);
1839 circle_enqueue_tail(&waitq->waitq_links, &linkp->wqll->wql_qlink);
1840 *linkp = WQL_NULL;
1841 }
1842
1843 return KERN_SUCCESS;
1844 }
1845
1846 kern_return_t
waitq_link_prepost_locked(struct waitq * waitq,struct waitq_set * wqset)1847 waitq_link_prepost_locked(struct waitq *waitq, struct waitq_set *wqset)
1848 {
1849 struct waitq_link *link;
1850
1851 __waitq_link_arguments_validate(waitq, wqset);
1852
1853 link = wql_find(waitq, wqset);
1854 if (link == NULL) {
1855 return KERN_NOT_IN_SET;
1856 }
1857
1858 if (!wql_wqs_preposted(link)) {
1859 wql_wqs_mark_preposted(link);
1860 waitq->waitq_preposted = true;
1861 circle_dequeue(&wqset->wqset_links, &link->wql_slink);
1862 circle_enqueue_tail(&wqset->wqset_preposts, &link->wql_slink);
1863 ipc_pset_prepost(wqset, waitq);
1864 }
1865
1866 return KERN_SUCCESS;
1867 }
1868
1869 waitq_link_t
waitq_unlink_locked(struct waitq * waitq,struct waitq_set * wqset)1870 waitq_unlink_locked(struct waitq *waitq, struct waitq_set *wqset)
1871 {
1872 struct waitq_link *link;
1873
1874 __waitq_link_arguments_validate(waitq, wqset);
1875
1876 link = wql_find(waitq, wqset);
1877 if (link) {
1878 circle_dequeue(wql_wqs_queue(wqset, link), &link->wql_slink);
1879 circle_dequeue(&waitq->waitq_links, &link->wql_qlink);
1880 }
1881
1882 return (waitq_link_t){ .wqll = link };
1883 }
1884
1885 void
waitq_unlink_all_locked(struct waitq * waitq,struct waitq_set * except_wqset,waitq_link_list_t * free_l)1886 waitq_unlink_all_locked(struct waitq *waitq, struct waitq_set *except_wqset,
1887 waitq_link_list_t *free_l)
1888 {
1889 struct waitq_link *kept_link = NULL;
1890 struct waitq_link *link;
1891
1892 assert(waitq_type(waitq) == WQT_PORT);
1893
1894 cqe_foreach_element_safe(link, &waitq->waitq_links, wql_qlink) {
1895 waitq_t wqs = wql_wqs(link);
1896
1897 if (wqs.wqs_set == except_wqset) {
1898 kept_link = link;
1899 continue;
1900 }
1901
1902 waitq_lock(wqs);
1903 circle_dequeue(wql_wqs_queue(wqs.wqs_set, link),
1904 &link->wql_slink);
1905 wql_list_push(free_l, link);
1906 waitq_unlock(wqs);
1907 }
1908
1909 circle_queue_init(&waitq->waitq_links);
1910 if (kept_link) {
1911 circle_enqueue_tail(&waitq->waitq_links, &kept_link->wql_qlink);
1912 }
1913 }
1914
1915 struct waitq *
waitq_set_first_prepost(struct waitq_set * wqset,wqs_prepost_flags_t flags)1916 waitq_set_first_prepost(struct waitq_set *wqset, wqs_prepost_flags_t flags)
1917 {
1918 circle_queue_t q = &wqset->wqset_preposts;
1919 queue_entry_t elt;
1920 struct waitq_link *link;
1921 struct waitq *wq;
1922 uint32_t ticket;
1923
1924 if (__improbable(!waitq_valid(wqset))) {
1925 return NULL;
1926 }
1927
1928 while (!circle_queue_empty(q)) {
1929 elt = circle_queue_first(q);
1930 link = cqe_element(elt, struct waitq_link, wql_slink);
1931 wq = link->wql_wq;
1932
1933 if (__improbable(!waitq_lock_reserve(wq, &ticket))) {
1934 waitq_unlock(wqset);
1935 waitq_lock_wait(wq, ticket);
1936 waitq_lock(wqset);
1937 if (!waitq_valid(wqset)) {
1938 waitq_unlock(wq);
1939 return NULL;
1940 }
1941
1942 if (elt != circle_queue_first(q) || link->wql_wq != wq) {
1943 waitq_unlock(wq);
1944 continue;
1945 }
1946 }
1947
1948 if (wq->waitq_preposted) {
1949 if ((flags & WQS_PREPOST_PEEK) == 0) {
1950 circle_queue_rotate_head_forward(q);
1951 }
1952 if ((flags & WQS_PREPOST_LOCK) == 0) {
1953 waitq_unlock(wq);
1954 }
1955 return wq;
1956 }
1957
1958 /*
1959 * We found a link that is no longer preposted,
1960 * someone must have called waitq_clear_prepost_locked()
1961 * and this set just only noticed.
1962 */
1963 wql_wqs_clear_preposted(link);
1964 waitq_unlock(wq);
1965
1966 circle_dequeue(q, &link->wql_slink);
1967 circle_enqueue_tail(&wqset->wqset_links, &link->wql_slink);
1968 }
1969
1970 return NULL;
1971 }
1972
1973
1974 #pragma mark select sets
1975
1976 /**
1977 * @function select_set_nextid()
1978 *
1979 * @brief
1980 * Generate a unique ID for a select set "generation"
1981 *
1982 * @discussion
1983 * This mixes the CPU number with a monotonic clock
1984 * (in order to avoid contention on a global atomic).
1985 *
1986 * In order for select sets to be invalidated very quickly,
1987 * they do not have backward linkages to their member queues.
1988 *
1989 * Instead, each time a new @c select() "pass" is initiated,
1990 * a new ID is generated, which is copied onto the @c waitq_sellink
1991 * links at the time of link.
1992 *
1993 * The zone for select sets is sequestered, which allows for select
1994 * wait queues to speculatively lock their set during prepost
1995 * and use this ID to debounce wakeups and avoid spurious wakeups
1996 * (as an "optimization" because select recovers from spurious wakeups,
1997 * we just want those to be very rare).
1998 */
1999 __attribute__((always_inline))
2000 static inline uint64_t
select_set_nextid(bool preemption_enabled)2001 select_set_nextid(bool preemption_enabled)
2002 {
2003 /* waitq_bootstrap() set the low byte to a unique value per CPU */
2004 static_assert(MAX_CPUS <= 256);
2005 const uint64_t inc = 256;
2006 uint64_t id;
2007
2008 #ifdef __x86_64__
2009 /* uncontended atomics are slower than disabling preemption on Intel */
2010 if (preemption_enabled) {
2011 disable_preemption();
2012 }
2013 id = (*PERCPU_GET(select_setid) += inc);
2014 if (preemption_enabled) {
2015 enable_preemption();
2016 }
2017 #else
2018 /*
2019 * if preemption is enabled this might update another CPU's
2020 * setid, which will be rare but is acceptable, it still
2021 * produces a unique select ID.
2022 *
2023 * We chose this because the uncontended atomics on !intel
2024 * are faster than disabling/reenabling preemption.
2025 */
2026 (void)preemption_enabled;
2027 id = os_atomic_add(PERCPU_GET(select_setid), inc, relaxed);
2028 #endif
2029
2030 return id;
2031 }
2032
2033 struct select_set *
select_set_alloc(void)2034 select_set_alloc(void)
2035 {
2036 struct select_set *selset;
2037 selset = zalloc_id(ZONE_ID_SELECT_SET, Z_ZERO | Z_WAITOK | Z_NOFAIL);
2038
2039 waitq_init(selset, WQT_SELECT_SET, SYNC_POLICY_FIFO);
2040 selset->selset_id = select_set_nextid(true);
2041
2042 return selset;
2043 }
2044
2045 __abortlike
2046 static void
__select_set_link_arguments_panic(struct waitq * waitq,struct select_set * set)2047 __select_set_link_arguments_panic(struct waitq *waitq, struct select_set *set)
2048 {
2049 if (!waitq_valid(waitq)) {
2050 panic("Invalid waitq: %p", waitq);
2051 }
2052 if (waitq_type(waitq) != WQT_SELECT) {
2053 panic("Invalid waitq type: %p:%d", waitq, waitq->waitq_type);
2054 }
2055 panic("Invalid waitq-set: %p", set);
2056 }
2057
2058 static inline void
__select_set_link_arguments_validate(struct waitq * waitq,struct select_set * set)2059 __select_set_link_arguments_validate(struct waitq *waitq, struct select_set *set)
2060 {
2061 if (!waitq_valid(waitq) ||
2062 waitq_type(waitq) != WQT_SELECT ||
2063 waitq_type(set) != WQT_SELECT_SET) {
2064 __select_set_link_arguments_panic(waitq, set);
2065 }
2066 }
2067
2068 void
select_set_link(struct waitq * waitq,struct select_set * set,waitq_link_t * linkp)2069 select_set_link(struct waitq *waitq, struct select_set *set,
2070 waitq_link_t *linkp)
2071 {
2072 struct waitq_sellink *link;
2073
2074 __select_set_link_arguments_validate(waitq, set);
2075
2076 waitq_lock(waitq);
2077
2078 if (waitq == &select_conflict_queue) {
2079 waitq_lock(set);
2080 set->selset_conflict = true;
2081 waitq_unlock(set);
2082 }
2083
2084 wql_list_foreach(link, &waitq->waitq_sellinks) {
2085 if (waitq_same(wql_wqs(link), set)) {
2086 goto found;
2087 }
2088 }
2089
2090 link = linkp->wqls;
2091 *linkp = WQL_NULL;
2092 wql_list_push(&waitq->waitq_sellinks, link);
2093
2094 found:
2095 link->wql_wqs = (uintptr_t)set;
2096 link->wql_setid = set->selset_id;
2097 waitq_unlock(waitq);
2098 }
2099
2100 static void
select_set_unlink_conflict_queue(struct select_set * set)2101 select_set_unlink_conflict_queue(struct select_set *set)
2102 {
2103 struct waitq_link_list_entry **prev;
2104 struct waitq_sellink *link;
2105
2106 waitq_lock(&select_conflict_queue);
2107
2108 /*
2109 * We know the conflict queue is hooked,
2110 * so find the linkage and free it.
2111 */
2112 prev = &select_conflict_queue.waitq_sellinks.next;
2113 for (;;) {
2114 assert(*prev);
2115 link = wql_list_elem(*prev);
2116 if (waitq_same(wql_wqs(link), set)) {
2117 *prev = link->wql_next.next;
2118 break;
2119 }
2120 prev = &link->wql_next.next;
2121 }
2122
2123 waitq_unlock(&select_conflict_queue);
2124
2125 waitq_link_free(WQT_SELECT_SET, link);
2126 }
2127
2128 static void
__select_set_reset(struct select_set * set,bool invalidate)2129 __select_set_reset(struct select_set *set, bool invalidate)
2130 {
2131 if (set->selset_conflict) {
2132 select_set_unlink_conflict_queue(set);
2133 }
2134
2135 waitq_lock(set);
2136 if (invalidate) {
2137 waitq_invalidate(set);
2138 }
2139 set->selset_id = select_set_nextid(false);
2140 set->selset_preposted = 0;
2141 set->selset_conflict = 0;
2142 waitq_unlock(set);
2143 }
2144
2145 void
select_set_reset(struct select_set * set)2146 select_set_reset(struct select_set *set)
2147 {
2148 __select_set_reset(set, false);
2149 }
2150
2151 void
select_set_free(struct select_set * set)2152 select_set_free(struct select_set *set)
2153 {
2154 __select_set_reset(set, true);
2155 hw_lck_ticket_destroy(&set->selset_interlock, &waitq_lck_grp);
2156 zfree_id(ZONE_ID_SELECT_SET, set);
2157 }
2158
2159 void
select_waitq_wakeup_and_deinit(struct waitq * waitq,event64_t wake_event,wait_result_t result,int priority)2160 select_waitq_wakeup_and_deinit(
2161 struct waitq *waitq,
2162 event64_t wake_event,
2163 wait_result_t result,
2164 int priority)
2165 {
2166 waitq_link_list_t free_l = { };
2167
2168 if (waitq_is_valid(waitq)) {
2169 assert(waitq_type(waitq) == WQT_SELECT);
2170
2171 waitq_lock(waitq);
2172
2173 waitq_wakeup64_all_locked(waitq, wake_event, result,
2174 priority, WAITQ_KEEP_LOCKED);
2175
2176 waitq_invalidate(waitq);
2177 free_l = waitq->waitq_sellinks;
2178 waitq->waitq_sellinks.next = NULL;
2179
2180 waitq_unlock(waitq);
2181
2182 waitq_link_free_list(WQT_SELECT, &free_l);
2183
2184 waitq_deinit(waitq);
2185 }
2186 }
2187
2188 #pragma mark assert_wait / wakeup (high level)
2189
2190 wait_result_t
waitq_assert_wait64(struct waitq * waitq,event64_t wait_event,wait_interrupt_t interruptible,uint64_t deadline)2191 waitq_assert_wait64(struct waitq *waitq,
2192 event64_t wait_event,
2193 wait_interrupt_t interruptible,
2194 uint64_t deadline)
2195 {
2196 thread_t thread = current_thread();
2197 wait_result_t ret;
2198 spl_t s = 0;
2199
2200 __waitq_validate(waitq);
2201
2202 if (waitq_irq_safe(waitq)) {
2203 s = splsched();
2204 }
2205 waitq_lock(waitq);
2206
2207 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
2208 TIMEOUT_URGENCY_SYS_NORMAL, deadline, TIMEOUT_NO_LEEWAY, thread);
2209
2210 waitq_unlock(waitq);
2211 if (waitq_irq_safe(waitq)) {
2212 splx(s);
2213 }
2214
2215 return ret;
2216 }
2217
2218 wait_result_t
waitq_assert_wait64_leeway(struct waitq * waitq,event64_t wait_event,wait_interrupt_t interruptible,wait_timeout_urgency_t urgency,uint64_t deadline,uint64_t leeway)2219 waitq_assert_wait64_leeway(struct waitq *waitq,
2220 event64_t wait_event,
2221 wait_interrupt_t interruptible,
2222 wait_timeout_urgency_t urgency,
2223 uint64_t deadline,
2224 uint64_t leeway)
2225 {
2226 wait_result_t ret;
2227 thread_t thread = current_thread();
2228 spl_t s = 0;
2229
2230 __waitq_validate(waitq);
2231
2232 if (waitq_irq_safe(waitq)) {
2233 s = splsched();
2234 }
2235 waitq_lock(waitq);
2236
2237 ret = waitq_assert_wait64_locked(waitq, wait_event, interruptible,
2238 urgency, deadline, leeway, thread);
2239
2240 waitq_unlock(waitq);
2241 if (waitq_irq_safe(waitq)) {
2242 splx(s);
2243 }
2244
2245 return ret;
2246 }
2247
2248 kern_return_t
waitq_wakeup64_one(struct waitq * waitq,event64_t wake_event,wait_result_t result,int priority)2249 waitq_wakeup64_one(struct waitq *waitq, event64_t wake_event,
2250 wait_result_t result, int priority)
2251 {
2252 kern_return_t kr;
2253 spl_t spl = 0;
2254
2255 __waitq_validate(waitq);
2256
2257 if (waitq_irq_safe(waitq)) {
2258 spl = splsched();
2259 }
2260 waitq_lock(waitq);
2261
2262 /* waitq is locked upon return */
2263 kr = waitq_wakeup64_one_locked(waitq, wake_event, result,
2264 priority, WAITQ_UNLOCK, WQ_OPTION_NONE);
2265
2266 if (waitq_irq_safe(waitq)) {
2267 splx(spl);
2268 }
2269
2270 return kr;
2271 }
2272
2273 kern_return_t
waitq_wakeup64_all(waitq_t waitq,event64_t wake_event,wait_result_t result,int priority)2274 waitq_wakeup64_all(waitq_t waitq, event64_t wake_event,
2275 wait_result_t result, int priority)
2276 {
2277 kern_return_t ret;
2278 spl_t spl = 0;
2279
2280 __waitq_validate(waitq);
2281
2282 if (waitq_irq_safe(waitq)) {
2283 spl = splsched();
2284 }
2285 waitq_lock(waitq);
2286
2287 ret = waitq_wakeup64_all_locked(waitq, wake_event, result,
2288 priority, WAITQ_UNLOCK);
2289
2290 if (waitq_irq_safe(waitq)) {
2291 splx(spl);
2292 }
2293
2294 return ret;
2295 }
2296
2297 kern_return_t
waitq_wakeup64_thread(struct waitq * waitq,event64_t event,thread_t thread,wait_result_t result)2298 waitq_wakeup64_thread(struct waitq *waitq, event64_t event,
2299 thread_t thread, wait_result_t result)
2300 {
2301 spl_t s = splsched();
2302 kern_return_t ret;
2303
2304 __waitq_validate(waitq);
2305 assert(waitq_irq_safe(waitq));
2306 waitq_lock(waitq);
2307
2308 ret = waitq_wakeup64_thread_and_unlock(waitq, event, thread, result);
2309
2310 splx(s);
2311
2312 return ret;
2313 }
2314
2315 thread_t
waitq_wakeup64_identify(waitq_t waitq,event64_t wake_event,wait_result_t result,int priority)2316 waitq_wakeup64_identify(waitq_t waitq, event64_t wake_event,
2317 wait_result_t result, int priority)
2318 {
2319 spl_t thread_spl = 0;
2320 thread_t thread;
2321 spl_t spl = 0;
2322
2323 __waitq_validate(waitq);
2324
2325 if (waitq_irq_safe(waitq)) {
2326 spl = splsched();
2327 }
2328 waitq_lock(waitq);
2329
2330 thread = waitq_wakeup64_identify_locked(waitq, wake_event, result,
2331 &thread_spl, priority, WAITQ_UNLOCK);
2332 /* waitq is unlocked, thread is locked */
2333
2334 if (thread != THREAD_NULL) {
2335 thread_reference(thread);
2336 thread_unlock(thread);
2337 splx(thread_spl);
2338 }
2339
2340 if (waitq_irq_safe(waitq)) {
2341 splx(spl);
2342 }
2343
2344 /* returns +1 ref to running thread or THREAD_NULL */
2345 return thread;
2346 }
2347
2348
2349 #pragma mark tests
2350 #if DEBUG || DEVELOPMENT
2351
2352 #include <ipc/ipc_pset.h>
2353 #include <sys/errno.h>
2354
2355 #define MAX_GLOBAL_TEST_QUEUES 64
2356 static struct waitq wqt_waitq_array[MAX_GLOBAL_TEST_QUEUES];
2357 static bool wqt_running;
2358 static bool wqt_init;
2359
2360 static bool
wqt_start(const char * test,int64_t * out)2361 wqt_start(const char *test, int64_t *out)
2362 {
2363 if (os_atomic_xchg(&wqt_running, true, acquire)) {
2364 *out = 0;
2365 return false;
2366 }
2367
2368 if (!wqt_init) {
2369 wqt_init = true;
2370 for (int i = 0; i < MAX_GLOBAL_TEST_QUEUES; i++) {
2371 waitq_init(&wqt_waitq_array[i], WQT_PORT, SYNC_POLICY_FIFO);
2372 }
2373 }
2374
2375 printf("[WQ] starting %s\n", test);
2376 return true;
2377 }
2378
2379 static int
wqt_end(const char * test,int64_t * out)2380 wqt_end(const char *test, int64_t *out)
2381 {
2382 os_atomic_store(&wqt_running, false, release);
2383 printf("[WQ] done %s\n", test);
2384 *out = 1;
2385 return 0;
2386 }
2387
2388 static struct waitq *
wqt_wq(uint32_t index)2389 wqt_wq(uint32_t index)
2390 {
2391 return &wqt_waitq_array[index];
2392 }
2393
2394 static uint32_t
wqt_idx(struct waitq * waitq)2395 wqt_idx(struct waitq *waitq)
2396 {
2397 assert(waitq >= wqt_waitq_array &&
2398 waitq < wqt_waitq_array + MAX_GLOBAL_TEST_QUEUES);
2399 return (uint32_t)(waitq - wqt_waitq_array);
2400 }
2401
2402 __attribute__((overloadable))
2403 static uint64_t
wqt_bit(uint32_t index)2404 wqt_bit(uint32_t index)
2405 {
2406 return 1ull << index;
2407 }
2408
2409 __attribute__((overloadable))
2410 static uint64_t
wqt_bit(struct waitq * waitq)2411 wqt_bit(struct waitq *waitq)
2412 {
2413 return wqt_bit(wqt_idx(waitq));
2414 }
2415
2416 static struct waitq_set *
wqt_wqset_create(void)2417 wqt_wqset_create(void)
2418 {
2419 struct waitq_set *wqset;
2420
2421 wqset = &ipc_pset_alloc_special(ipc_space_kernel)->ips_wqset;
2422 printf("[WQ]: created waitq set %p\n", wqset);
2423 return wqset;
2424 }
2425
2426 static void
wqt_wqset_free(struct waitq_set * wqset)2427 wqt_wqset_free(struct waitq_set *wqset)
2428 {
2429 printf("[WQ]: destroying waitq set %p\n", wqset);
2430 waitq_lock(wqset);
2431 ipc_pset_destroy(ipc_space_kernel,
2432 __container_of(wqset, struct ipc_pset, ips_wqset));
2433 }
2434
2435 static void
wqt_link(uint32_t index,struct waitq_set * wqset,kern_return_t want)2436 wqt_link(uint32_t index, struct waitq_set *wqset, kern_return_t want)
2437 {
2438 struct waitq *waitq = wqt_wq(index);
2439 waitq_link_t link = waitq_link_alloc(WQT_PORT_SET);
2440 kern_return_t kr;
2441
2442 printf("[WQ]: linking waitq [%d] to global wqset (%p)\n", index, wqset);
2443
2444 waitq_lock(waitq);
2445 waitq_lock(wqset);
2446 kr = waitq_link_locked(waitq, wqset, &link);
2447 waitq_unlock(wqset);
2448 waitq_unlock(waitq);
2449
2450 if (link.wqlh) {
2451 waitq_link_free(WQT_PORT_SET, link);
2452 }
2453
2454 printf("[WQ]:\tkr=%d\texpected=%d\n", kr, want);
2455 assert(kr == want);
2456 }
2457
2458 static void
wqt_unlink(uint32_t index,struct waitq_set * wqset,kern_return_t want)2459 wqt_unlink(uint32_t index, struct waitq_set *wqset, kern_return_t want)
2460 {
2461 struct waitq *waitq = wqt_wq(index);
2462 waitq_link_t link;
2463 kern_return_t kr;
2464
2465 printf("[WQ]: unlinking waitq [%d] from global wqset (%p)\n",
2466 index, wqset);
2467
2468 waitq_lock(waitq);
2469 waitq_lock(wqset);
2470 link = waitq_unlink_locked(waitq, wqset);
2471 waitq_unlock(wqset);
2472 waitq_unlock(waitq);
2473
2474 if (link.wqlh) {
2475 waitq_link_free(WQT_PORT_SET, link);
2476 kr = KERN_SUCCESS;
2477 } else {
2478 kr = KERN_NOT_IN_SET;
2479 }
2480
2481 printf("[WQ]: \tkr=%d\n", kr);
2482 assert(kr == want);
2483 }
2484
2485 static void
wqt_wakeup_one(uint32_t index,event64_t event64,kern_return_t want)2486 wqt_wakeup_one(uint32_t index, event64_t event64, kern_return_t want)
2487 {
2488 kern_return_t kr;
2489
2490 printf("[WQ]: Waking one thread on waitq [%d] event:0x%llx\n",
2491 index, event64);
2492 kr = waitq_wakeup64_one(wqt_wq(index), event64,
2493 THREAD_AWAKENED, WAITQ_ALL_PRIORITIES);
2494 printf("[WQ]: \tkr=%d\n", kr);
2495 assert(kr == want);
2496 }
2497
2498 static void
wqt_clear_preposts(uint32_t idx)2499 wqt_clear_preposts(uint32_t idx)
2500 {
2501 waitq_lock(wqt_wq(idx));
2502 (void)waitq_clear_prepost_locked(wqt_wq(idx));
2503 waitq_unlock(wqt_wq(idx));
2504 }
2505
2506 static void
wqt_preposts_gc_locked(struct waitq_set * wqset)2507 wqt_preposts_gc_locked(struct waitq_set *wqset)
2508 {
2509 circle_queue_t q = &wqset->wqset_preposts;
2510 struct waitq_link *link;
2511 uint32_t ticket;
2512
2513 again:
2514 cqe_foreach_element_safe(link, q, wql_slink) {
2515 struct waitq *wq = link->wql_wq;
2516
2517 if (!waitq_lock_reserve(wq, &ticket)) {
2518 waitq_unlock(wqset);
2519 waitq_lock_wait(wq, ticket);
2520 waitq_lock(wqset);
2521 waitq_unlock(wq);
2522 /* the list was possibly mutated, restart */
2523 goto again;
2524 }
2525
2526 if (!wq->waitq_preposted) {
2527 wql_wqs_clear_preposted(link);
2528 circle_dequeue(q, &link->wql_slink);
2529 circle_enqueue_tail(&wqset->wqset_links, &link->wql_slink);
2530 }
2531
2532 waitq_unlock(wq);
2533 }
2534 }
2535
2536 static void
wqt_expect_preposts(struct waitq_set * wqset,uint64_t preposts)2537 wqt_expect_preposts(struct waitq_set *wqset, uint64_t preposts)
2538 {
2539 struct waitq_link *link;
2540 uint64_t found = 0;
2541
2542 waitq_lock(wqset);
2543
2544 wqt_preposts_gc_locked(wqset);
2545
2546 cqe_foreach_element(link, &wqset->wqset_preposts, wql_slink) {
2547 struct waitq *waitq = link->wql_wq;
2548
2549 printf("[WQ]: found prepost %d\n", wqt_idx(waitq));
2550 assertf((found & wqt_bit(waitq)) == 0,
2551 "found waitq %d twice", wqt_idx(waitq));
2552 found |= wqt_bit(waitq);
2553 }
2554
2555 waitq_unlock(wqset);
2556
2557 assertf(found == preposts, "preposts expected 0x%llx, but got 0x%llx",
2558 preposts, found);
2559 }
2560
2561 static int
waitq_basic_test(__unused int64_t in,int64_t * out)2562 waitq_basic_test(__unused int64_t in, int64_t *out)
2563 {
2564 struct waitq_set *wqset;
2565
2566 if (!wqt_start(__func__, out)) {
2567 return EBUSY;
2568 }
2569
2570 wqset = wqt_wqset_create();
2571 wqt_link(10, wqset, KERN_SUCCESS);
2572 wqt_link(10, wqset, KERN_ALREADY_IN_SET);
2573 wqt_link(11, wqset, KERN_SUCCESS);
2574 wqt_link(11, wqset, KERN_ALREADY_IN_SET);
2575 wqt_link(12, wqset, KERN_SUCCESS);
2576 wqt_link(12, wqset, KERN_ALREADY_IN_SET);
2577
2578 wqt_wakeup_one(10, NO_EVENT64, KERN_NOT_WAITING);
2579 wqt_wakeup_one(12, NO_EVENT64, KERN_NOT_WAITING);
2580
2581 wqt_expect_preposts(wqset, wqt_bit(10) | wqt_bit(12));
2582 wqt_clear_preposts(10);
2583
2584 wqt_expect_preposts(wqset, wqt_bit(12));
2585 wqt_clear_preposts(12);
2586
2587 wqt_expect_preposts(wqset, 0);
2588
2589 wqt_unlink(12, wqset, KERN_SUCCESS);
2590 wqt_unlink(12, wqset, KERN_NOT_IN_SET);
2591 wqt_unlink(11, wqset, KERN_SUCCESS);
2592 wqt_unlink(10, wqset, KERN_SUCCESS);
2593 wqt_wqset_free(wqset);
2594
2595 return wqt_end(__func__, out);
2596 }
2597 SYSCTL_TEST_REGISTER(waitq_basic, waitq_basic_test);
2598 #endif /* DEBUG || DEVELOPMENT */
2599