1 /*
2 * Copyright (c) 2014-2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #ifndef _WAITQ_H_
29 #define _WAITQ_H_
30 #ifdef KERNEL_PRIVATE
31
32 #include <mach/mach_types.h>
33 #include <mach/sync_policy.h>
34 #include <mach/kern_return.h> /* for kern_return_t */
35
36 #include <kern/kern_types.h> /* for wait_queue_t */
37 #include <kern/queue.h>
38 #include <kern/assert.h>
39
40 #include <sys/cdefs.h>
41
42 #ifdef XNU_KERNEL_PRIVATE
43 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
44 #include <kern/priority_queue.h>
45 #ifdef MACH_KERNEL_PRIVATE
46 #include <kern/spl.h>
47 #include <kern/ticket_lock.h>
48 #include <kern/circle_queue.h>
49 #include <kern/mpsc_queue.h>
50
51 #include <machine/cpu_number.h>
52 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
53 #endif /* MACH_KERNEL_PRIVATE */
54 #endif /* XNU_KERNEL_PRIVATE */
55
56 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
57
58 #pragma GCC visibility push(hidden)
59
60 /*!
61 * @enum waitq_wakeup_flags_t
62 *
63 * @const WAITQ_DEFAULT
64 * Use the default behavior for wakeup.
65 *
66 * @const WAITQ_UPDATE_INHERITOR
67 * If the wait queue is a turnstile,
68 * set its inheritor to the woken up thread,
69 * or clear the inheritor if the last thread is woken up.
70 *
71 #if MACH_KERNEL_PRIVATE
72 * @const WAITQ_PROMOTE_PRIORITY (Mach IPC only)
73 * Promote the woken up thread(s) with a MINPRI_WAITQ floor,
74 * until it calls waitq_clear_promotion_locked().
75 *
76 * @const WAITQ_UNLOCK (waitq_wakeup64_*_locked only)
77 * Unlock the wait queue before any thread_go() is called for woken up threads.
78 *
79 * @const WAITQ_KEEP_LOCKED (waitq_wakeup64_*_locked only)
80 * Keep the wait queue locked for this call.
81 *
82 * @const WAITQ_HANDOFF (waitq_wakeup64_one, waitq_wakeup64_identify*)
83 * Attempt a handoff to the woken up thread.
84 #endif
85 */
86 __options_decl(waitq_wakeup_flags_t, uint32_t, {
87 WAITQ_WAKEUP_DEFAULT = 0x0000,
88 WAITQ_UPDATE_INHERITOR = 0x0001,
89 #if MACH_KERNEL_PRIVATE
90 WAITQ_PROMOTE_PRIORITY = 0x0002,
91 WAITQ_UNLOCK = 0x0004,
92 WAITQ_KEEP_LOCKED = 0x0000,
93 WAITQ_HANDOFF = 0x0008,
94 #endif /* MACH_KERNEL_PRIVATE */
95 });
96
97 /* Opaque sizes and alignment used for struct verification */
98 #if __arm__ || __arm64__
99 #define WQ_OPAQUE_ALIGN __BIGGEST_ALIGNMENT__
100 #if __arm__
101 #define WQ_OPAQUE_SIZE 32
102 #else
103 #define WQ_OPAQUE_SIZE 40
104 #endif
105 #elif __x86_64__
106 #define WQ_OPAQUE_ALIGN 8
107 #define WQ_OPAQUE_SIZE 48
108 #else
109 #error Unknown size requirement
110 #endif
111
112 #ifdef __cplusplus
113 #define __waitq_transparent_union
114 #else
115 #define __waitq_transparent_union __attribute__((__transparent_union__))
116 #endif
117
118 /**
119 * @typedef waitq_t
120 *
121 * @brief
122 * This is an abstract typedef used to denote waitq APIs that can be called
123 * on any kind of wait queue (or wait queue set).
124 */
125 typedef union {
126 struct waitq *wq_q;
127 struct waitq_set *wqs_set;
128 struct select_set *wqs_sel;
129 } __waitq_transparent_union waitq_t;
130
131 #if !MACH_KERNEL_PRIVATE
132
133 /*
134 * The opaque waitq structure is here mostly for AIO and selinfo,
135 * but could potentially be used by other BSD subsystems.
136 */
137 struct waitq {
138 char opaque[WQ_OPAQUE_SIZE];
139 } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
140
141 #endif /* MACH_KERNEL_PRIVATE */
142 #ifdef XNU_KERNEL_PRIVATE
143
144 /**
145 * @typedef waitq_link_t
146 *
147 * @brief
148 * Union that represents any kind of wait queue link.
149 *
150 * @discussion
151 * Unlike @c waitq_t which can be used safely on its own because
152 * @c waitq_type() can return which actual wait queue type is pointed at,
153 * @c waitq_link_t can't be used without knowing the type of wait queue
154 * (or wait queue set) it refers to.
155 */
156 typedef union {
157 struct waitq_link_hdr *wqlh;
158 struct waitq_sellink *wqls;
159 struct waitq_link *wqll;
160 } __waitq_transparent_union waitq_link_t;
161
162 #define WQL_NULL ((waitq_link_t){ .wqlh = NULL })
163
164 /**
165 * @typedef waitq_link_list_t
166 *
167 * @brief
168 * List of wait queue links (used for cleanup).
169 *
170 * @discussion
171 * This type is engineered so that the way it links elements is equivalent
172 * to the "forward" linking of a circle queue.
173 */
174 typedef struct waitq_link_list_entry {
175 struct waitq_link_list_entry *next;
176 } waitq_link_list_t;
177
178 /**
179 * @enum waitq_type_t
180 *
181 * @brief
182 * List of all possible wait queue (and wait queue set) types.
183 *
184 * @description
185 * (I) mark IRQ safe queues
186 * (P) mark queues that prepost to sets
187 * (S) mark wait queue sets
188 * (keep those together to allow range checks for irq-safe/sets)
189 */
190 __enum_decl(waitq_type_t, uint32_t, {
191 WQT_INVALID = 0x0, /**< ( ) invalid type, unintialized */
192 WQT_QUEUE = 0x1, /**< (I) general wait queue */
193 WQT_TURNSTILE = 0x2, /**< (I) wait queue used in @c turnstile */
194 WQT_PORT = 0x3, /**< (P) wait queue used in @c ipc_port_t */
195 WQT_SELECT = 0x4, /**< (P) wait queue used in @c selinfo */
196 WQT_PORT_SET = 0x5, /**< (S) wait queue set used in @c ipc_pset_t */
197 WQT_SELECT_SET = 0x6, /**< (S) wait queue set used for @c select() */
198 });
199
200 #ifdef MACH_KERNEL_PRIVATE
201 #pragma mark Mach-only types and helpers
202
203 /*
204 * The waitq needs WAITQ_FLAGS_BITS, which leaves 27 or 59 bits
205 * for the eventmask.
206 */
207 #define WAITQ_FLAGS_BITS 5
208 #define _EVENT_MASK_BITS (8 * sizeof(waitq_flags_t) - WAITQ_FLAGS_BITS)
209
210 #if __arm64__
211 typedef uint32_t waitq_flags_t;
212 #else
213 typedef unsigned long waitq_flags_t;
214 #endif
215
216 /* Make sure the port abuse of bits doesn't overflow the evntmask size */
217 #define WAITQ_FLAGS_OVERFLOWS(...) \
218 (sizeof(struct { waitq_flags_t bits : WAITQ_FLAGS_BITS, __VA_ARGS__; }) \
219 > sizeof(waitq_flags_t))
220
221 #define WAITQ_FLAGS(prefix, ...) \
222 struct { \
223 waitq_type_t prefix##_type:3; \
224 waitq_flags_t \
225 prefix##_fifo:1, /* fifo wakeup policy? */ \
226 prefix##_preposted:1 /* queue was preposted */ \
227 - 2 * WAITQ_FLAGS_OVERFLOWS(__VA_ARGS__), \
228 __VA_ARGS__; \
229 }
230
231 /*
232 * _type:
233 * the waitq type (a WQT_* value)
234 *
235 * _fifo:
236 * whether the wakeup policy is FIFO or LIFO.
237 *
238 * _preposted:
239 * o WQT_PORT: the port message queue is not empty
240 * o WQT_SELECT_SET: has the set been preposted to
241 * o others: unused
242 *
243 * _eventmask:
244 * o WQT_QUEUE: (global queues) mask events being waited on
245 * o WQT_PORT: many bits (see ipc_port_t)
246 * o WQT_PORT_SET: port_set index in its space
247 * o WQT_SELECT_SET: selset_conflict (is the conflict queue hooked)
248 * o other: unused
249 *
250 * _interlock:
251 * The lock of the waitq/waitq_set
252 *
253 * _queue/_prio_queue/_ts:
254 * o WQT_QUEUE,
255 * WQT_SELECT,
256 * WQT_PORT_SET,
257 * WQT_SELECT_SET: circle queue of waiting threads
258 * o WQT_TURNSTILE: priority queue of waiting threads
259 * o WQT_PORT: pointer to the receive turnstile of the port
260 *
261 * _links/_inheritor/_sellinks:
262 * o WQT_PORT: linkages to WQT_PORT_SET waitq sets
263 * o WQT_SELECT: linkages to WQT_SELECT_SET select sets
264 * o WQT_TURNSTILE: turnstile inheritor
265 * o WQT_PORT_SET: WQT_PORT linkages that haven't preposted
266 * o other: unused
267 */
268 #define WAITQ_HDR(prefix, ...) \
269 WAITQ_FLAGS(prefix, __VA_ARGS__); \
270 hw_lck_ticket_t prefix##_interlock; \
271 uint8_t prefix##_padding[sizeof(waitq_flags_t) - \
272 sizeof(hw_lck_ticket_t)]; \
273 union { \
274 circle_queue_head_t prefix##_queue; \
275 struct priority_queue_sched_max prefix##_prio_queue; \
276 struct turnstile *prefix##_ts; \
277 }; \
278 union { \
279 circle_queue_head_t prefix##_links; \
280 waitq_link_list_t prefix##_sellinks; \
281 void *prefix##_inheritor; \
282 struct mpsc_queue_chain prefix##_defer; \
283 }
284
285 /**
286 * @struct waitq
287 *
288 * @discussion
289 * This is the definition of the common event wait queue
290 * that the scheduler APIs understand. It is used
291 * internally by the gerneralized event waiting mechanism
292 * (assert_wait), and also for items that maintain their
293 * own wait queues (such as ports and semaphores).
294 *
295 * It is not published to other kernel components.
296 *
297 * NOTE: Hardware locks are used to protect event wait
298 * queues since interrupt code is free to post events to
299 * them.
300 */
301 struct waitq {
302 WAITQ_HDR(waitq, waitq_eventmask:_EVENT_MASK_BITS);
303 } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
304
305 /**
306 * @struct waitq_set
307 *
308 * @brief
309 * This is the definition of a waitq set used in port-sets.
310 *
311 * @discussion
312 * The wqset_index field is used to stash the pset index for debugging
313 * purposes (not the full name as it would truncate).
314 */
315 struct waitq_set {
316 WAITQ_HDR(wqset, wqset_index:_EVENT_MASK_BITS);
317 circle_queue_head_t wqset_preposts;
318 };
319
320 /**
321 * @struct select_set
322 *
323 * @brief
324 * This is the definition of a waitq set used to back the select syscall.
325 */
326 struct select_set {
327 WAITQ_HDR(selset, selset_conflict:1);
328 uint64_t selset_id;
329 };
330
331 static inline waitq_type_t
waitq_type(waitq_t wq)332 waitq_type(waitq_t wq)
333 {
334 return wq.wq_q->waitq_type;
335 }
336
337 static inline bool
waitq_same(waitq_t wq1,waitq_t wq2)338 waitq_same(waitq_t wq1, waitq_t wq2)
339 {
340 return wq1.wq_q == wq2.wq_q;
341 }
342
343 static inline bool
waitq_is_null(waitq_t wq)344 waitq_is_null(waitq_t wq)
345 {
346 return wq.wq_q == NULL;
347 }
348 #define waitq_wait_possible(thread) waitq_is_null((thread)->waitq)
349
350 static inline bool
waitq_preposts(waitq_t wq)351 waitq_preposts(waitq_t wq)
352 {
353 switch (waitq_type(wq)) {
354 case WQT_PORT:
355 case WQT_SELECT:
356 return true;
357 default:
358 return false;
359 }
360 }
361
362 static inline bool
waitq_irq_safe(waitq_t waitq)363 waitq_irq_safe(waitq_t waitq)
364 {
365 switch (waitq_type(waitq)) {
366 case WQT_QUEUE:
367 case WQT_TURNSTILE:
368 return true;
369 default:
370 return false;
371 }
372 }
373
374 static inline bool
waitq_valid(waitq_t waitq)375 waitq_valid(waitq_t waitq)
376 {
377 return waitq.wq_q && waitq.wq_q->waitq_interlock.lck_valid;
378 }
379
380 /*
381 * global waitqs
382 */
383 extern struct waitq *_global_eventq(char *event, size_t event_length);
384 #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event))
385
386 #endif /* MACH_KERNEL_PRIVATE */
387 #pragma mark locking
388
389 /*!
390 * @function waitq_lock()
391 *
392 * @brief
393 * Lock a wait queue or wait queue set.
394 *
395 * @discussion
396 * It is the responsibility of the caller to disable
397 * interrupts if the queue is IRQ safe.
398 */
399 extern void waitq_lock(waitq_t wq);
400
401 /*!
402 * @function waitq_unlock()
403 *
404 * @brief
405 * Unlock a wait queue or wait queue set.
406 *
407 * @discussion
408 * It is the responsibility of the caller to reenable
409 * interrupts if the queue is IRQ safe.
410 */
411 extern void waitq_unlock(waitq_t wq);
412
413 /**
414 * @function waitq_is_valid()
415 *
416 * @brief
417 * Returns whether a wait queue or wait queue set has been invalidated.
418 */
419 extern bool waitq_is_valid(waitq_t wq);
420
421 #ifdef MACH_KERNEL_PRIVATE
422
423 /**
424 * @function waitq_invalidate()
425 *
426 * @brief
427 * Invalidate a waitq.
428 *
429 * @discussion
430 * It is the responsibility of the caller to make sure that:
431 * - all waiters are woken up
432 * - linkages and preposts are cleared (non IRQ Safe waitqs).
433 */
434 extern void waitq_invalidate(waitq_t wq);
435
436 /*!
437 * @function waitq_held()
438 *
439 * @brief
440 * Returns whether someone is holding the lock of the specified wait queue.
441 */
442 extern bool waitq_held(waitq_t wq) __result_use_check;
443
444 /*!
445 * @function waitq_lock_allow_invalid()
446 *
447 * @brief
448 * Lock the specified wait queue if it is valid.
449 *
450 * @discussion
451 * This function allows for the backing memory of the specified wait queue
452 * to be unmapped.
453 *
454 * Combining this with the zone allocator @c ZC_SEQUESTER feature
455 * (along with @c ZC_ZFREE_CLEARMEM and @c ZC_KASAN_NOQUARANTINE)
456 * allows to create clever schemes (See @c ipc_right_lookup_read()).
457 */
458 extern bool waitq_lock_allow_invalid(waitq_t wq) __result_use_check;
459
460 /*!
461 * @function waitq_lock_reserve()
462 *
463 * @brief
464 * Reserves the lock of the specified wait queue.
465 *
466 * @discussion
467 * Wait queue locks are "ordered" and a reservation in the lock queue
468 * can be acquired. This can be used to resolve certain lock inversions
469 * without risks for the memory backing the wait queue to disappear.
470 *
471 * See <kern/ticket_lock.h> for details.
472 *
473 * @param wq the specified wait queue
474 * @param ticket a pointer to memory to hold the reservation
475 * @returns
476 * - true if the lock was acquired
477 * - false otherwise, and @c waitq_lock_wait() @em must be called
478 * to wait for this ticket.
479 */
480 extern bool waitq_lock_reserve(waitq_t wq, uint32_t *ticket) __result_use_check;
481
482 /*!
483 * @function waitq_lock_wait()
484 *
485 * @brief
486 * Wait for a ticket acquired with @c waitq_lock_reserve().
487 */
488 extern void waitq_lock_wait(waitq_t wq, uint32_t ticket);
489
490 /*!
491 * @function waitq_lock_try()
492 *
493 * @brief
494 * Attempts to acquire the lock of the specified wait queue.
495 *
496 * @discussion
497 * Using @c waitq_lock_try() is discouraged as it leads to inefficient
498 * algorithms prone to contention.
499 *
500 * Schemes based on @c waitq_lock_reserve() / @c waitq_lock_wait() is preferred.
501 *
502 */
503 extern bool waitq_lock_try(waitq_t wq) __result_use_check;
504
505 #endif /* MACH_KERNEL_PRIVATE */
506 #pragma mark assert_wait / wakeup
507
508 /**
509 * @function waitq_assert_wait64()
510 *
511 * @brief
512 * Declare a thread's intent to wait on @c waitq for @c wait_event.
513 *
514 * @discussion
515 * @c waitq must be unlocked
516 */
517 extern wait_result_t waitq_assert_wait64(
518 waitq_t waitq,
519 event64_t wait_event,
520 wait_interrupt_t interruptible,
521 uint64_t deadline);
522
523 /**
524 * @function waitq_assert_wait64_leeway()
525 *
526 * @brief
527 * Declare a thread's intent to wait on @c waitq for @c wait_event.
528 *
529 * @discussion
530 * @c waitq must be unlocked
531 */
532 extern wait_result_t waitq_assert_wait64_leeway(
533 waitq_t waitq,
534 event64_t wait_event,
535 wait_interrupt_t interruptible,
536 wait_timeout_urgency_t urgency,
537 uint64_t deadline,
538 uint64_t leeway);
539
540 /**
541 * @function waitq_wakeup64_one()
542 *
543 * @brief
544 * Wakeup a single thread from a waitq that's waiting for a given event.
545 *
546 * @discussion
547 * @c waitq must be unlocked
548 */
549 extern kern_return_t waitq_wakeup64_one(
550 waitq_t waitq,
551 event64_t wake_event,
552 wait_result_t result,
553 waitq_wakeup_flags_t flags);
554
555 /**
556 * @functiong waitq_wakeup64_all()
557 *
558 * @brief
559 * Wakeup all threads from a waitq that are waiting for a given event.
560 *
561 * @description
562 * This function will set the inheritor of the wait queue
563 * to TURNSTILE_INHERITOR_NULL if it is a turnstile wait queue.
564 *
565 * @c waitq must be unlocked
566 */
567 extern kern_return_t waitq_wakeup64_all(
568 waitq_t waitq,
569 event64_t wake_event,
570 wait_result_t result,
571 waitq_wakeup_flags_t flags);
572
573 /**
574 * @function waitq_wakeup64_identify()
575 *
576 * @brief
577 * Wakeup one thread waiting on 'waitq' for 'wake_event'
578 *
579 * @discussion
580 * @c waitq must be unlocked.
581 *
582 * May temporarily disable and re-enable interrupts
583 *
584 * @returns
585 * - THREAD_NULL if no thread was waiting
586 * - a reference to a thread that was waiting on @c waitq.
587 */
588 extern thread_t waitq_wakeup64_identify(
589 waitq_t waitq,
590 event64_t wake_event,
591 wait_result_t result,
592 waitq_wakeup_flags_t flags);
593
594 /**
595 * @function waitq_wakeup64_thread()
596 *
597 * @brief
598 * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
599 *
600 * @discussion
601 * @c waitq must be unlocked and must be IRQ safe.
602 * @c thread must be unlocked
603 *
604 * May temporarily disable and re-enable interrupts
605 */
606 extern kern_return_t waitq_wakeup64_thread(
607 struct waitq *waitq,
608 event64_t wake_event,
609 thread_t thread,
610 wait_result_t result);
611
612 #pragma mark Mach-only assert_wait / wakeup
613 #ifdef MACH_KERNEL_PRIVATE
614
615 /**
616 * @function waitq_clear_promotion_locked()
617 *
618 * @brief
619 * Clear a potential thread priority promotion from a waitq wakeup
620 * with @c WAITQ_PROMOTE_PRIORITY.
621 *
622 * @discussion
623 * @c waitq must be locked.
624 *
625 * This must be called on the thread which was woken up
626 * with @c TH_SFLAG_WAITQ_PROMOTED.
627 */
628 extern void waitq_clear_promotion_locked(
629 waitq_t waitq,
630 thread_t thread);
631
632 /**
633 * @function waitq_pull_thread_locked()
634 *
635 * @brief
636 * Remove @c thread from its current blocking state on @c waitq.
637 *
638 * @discussion
639 * This function is only used by clear_wait_internal in sched_prim.c
640 * (which itself is called by the timer wakeup path and clear_wait()).
641 *
642 * @c thread must is locked (the function might drop and reacquire the lock).
643 *
644 * @returns
645 * - true if the thread has been pulled successfuly.
646 * - false otherwise, if the thread was no longer waiting on this waitq.
647 */
648 extern bool waitq_pull_thread_locked(
649 waitq_t waitq,
650 thread_t thread);
651
652 /**
653 * @function waitq_assert_wait64_locked()
654 *
655 * @brief
656 * Declare a thread's intent to wait on @c waitq for @c wait_event.
657 *
658 * @discussion
659 * @c waitq must be locked.
660 *
661 * Note that @c waitq might be unlocked and relocked during this call
662 * if it is a waitq set.
663 */
664 extern wait_result_t waitq_assert_wait64_locked(
665 waitq_t waitq,
666 event64_t wait_event,
667 wait_interrupt_t interruptible,
668 wait_timeout_urgency_t urgency,
669 uint64_t deadline,
670 uint64_t leeway,
671 thread_t thread);
672
673 /**
674 * @function waitq_wakeup64_all_locked()
675 *
676 * @brief
677 * Wakeup all threads waiting on @c waitq for @c wake_event
678 *
679 * @discussion
680 * @c waitq must be locked.
681 *
682 * May temporarily disable and re-enable interrupts
683 * and re-adjust thread priority of each awoken thread.
684 */
685 extern kern_return_t waitq_wakeup64_all_locked(
686 waitq_t waitq,
687 event64_t wake_event,
688 wait_result_t result,
689 waitq_wakeup_flags_t flags);
690
691 /**
692 * @function waitq_wakeup64_one_locked()
693 *
694 * @brief
695 * Wakeup one thread waiting on @c waitq for @c wake_event.
696 *
697 * @discussion
698 * @c waitq must be locked.
699 *
700 * May temporarily disable and re-enable interrupts.
701 */
702 extern kern_return_t waitq_wakeup64_one_locked(
703 waitq_t waitq,
704 event64_t wake_event,
705 wait_result_t result,
706 waitq_wakeup_flags_t flags);
707
708 /**
709 * @function waitq_wakeup64_identify_locked()
710 *
711 * @brief
712 * Wakeup one thread waiting on 'waitq' for 'wake_event'
713 *
714 * @returns
715 * A locked, runnable thread. If return value is non-NULL,
716 * interrupts have also been disabled, and the caller
717 * must call @c splx(*spl).
718 */
719 extern thread_t waitq_wakeup64_identify_locked(
720 waitq_t waitq,
721 event64_t wake_event,
722 wait_result_t result,
723 waitq_wakeup_flags_t flags,
724 spl_t *spl);
725
726 /**
727 * @function waitq_wakeup64_thread_and_unlock()
728 *
729 * @brief
730 * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
731 *
732 * @discussion
733 * @c waitq must IRQ safe and locked, unlocked on return.
734 * @c thread must be unlocked
735 */
736 extern kern_return_t waitq_wakeup64_thread_and_unlock(
737 struct waitq *waitq,
738 event64_t wake_event,
739 thread_t thread,
740 wait_result_t result);
741
742 #endif /* MACH_KERNEL_PRIVATE */
743 #pragma mark waitq links
744
745 /*!
746 * @function waitq_link_alloc()
747 *
748 * @brief
749 * Allocates a linkage object to be used with a wait queue of the specified type.
750 */
751 extern waitq_link_t waitq_link_alloc(
752 waitq_type_t type);
753
754 /*!
755 * @function waitq_link_free()
756 *
757 * @brief
758 * Frees a linkage object that was used with a wait queue of the specified type.
759 */
760 extern void waitq_link_free(
761 waitq_type_t type,
762 waitq_link_t link);
763
764 /*!
765 * @function waitq_link_free_list()
766 *
767 * @brief
768 * Frees a list of linkage object that was used with a wait queue
769 * of the specified type.
770 */
771 extern void waitq_link_free_list(
772 waitq_type_t type,
773 waitq_link_list_t *list);
774
775 #pragma mark wait queues lifecycle
776
777 /*!
778 * @function waitq_init()
779 *
780 * @brief
781 * Initializes a wait queue.
782 *
783 * @discussion
784 * @c type must be a valid type.
785 */
786 extern void waitq_init(
787 waitq_t waitq,
788 waitq_type_t type,
789 int policy);
790
791 /*!
792 * @function waitq_deinit()
793 *
794 * @brief
795 * Destroys a wait queue.
796 *
797 * @discussion
798 * @c waitq can't be a select set.
799 */
800 extern void waitq_deinit(
801 waitq_t waitq);
802
803 #pragma mark port wait queues and port set waitq sets
804 #ifdef MACH_KERNEL_PRIVATE
805
806 /**
807 * @function waitq_link_locked()
808 *
809 * @brief
810 * Link the specified port wait queue to a specified port set wait queue set.
811 *
812 * @discussion
813 * This function doesn't handle preposting/waking up the set
814 * when the wait queue is already preposted.
815 *
816 * @param waitq the port wait queue to link, must be locked.
817 * @param wqset the port set wait queue set to link, must be locked.
818 * @param link a pointer to a link allocated with
819 * @c waitq_link_alloc(WQT_PORT_SET).
820 */
821 extern kern_return_t waitq_link_locked(
822 struct waitq *waitq,
823 struct waitq_set *wqset,
824 waitq_link_t *link);
825
826 /**
827 * @function waitq_link_prepost_locked()
828 *
829 * @brief
830 * Force a given link to be preposted.
831 *
832 * @param waitq the port wait queue to link, must be locked.
833 * @param wqset the port set wait queue set to link, must be locked.
834 */
835 extern kern_return_t waitq_link_prepost_locked(
836 struct waitq *waitq,
837 struct waitq_set *wqset);
838
839 /**
840 * @function
841 * Unlinks the specified port wait queue from a specified port set wait queue set.
842 *
843 * @param waitq the port wait queue to unlink, must be locked.
844 * @param wqset the port set wait queue set to link, must be locked.
845 * @returns
846 * - @c WQL_NULL if the port wasn't a member of the set.
847 * - a link to consume with @c waitq_link_free() otherwise.
848 */
849 extern waitq_link_t waitq_unlink_locked(
850 struct waitq *waitq,
851 struct waitq_set *wqset);
852
853 /**
854 * @function waitq_unlink_all_locked()
855 *
856 * @brief
857 * Unlink the specified wait queue from all sets to which it belongs
858 *
859 * @param waitq the port wait queue to link, must be locked.
860 * @param except_wqset do not unlink this wqset.
861 * @param free_l a waitq link list to which links to free will be added.
862 * the caller must call @c waitq_link_free_list() on it.
863 */
864 extern void waitq_unlink_all_locked(
865 struct waitq *waitq,
866 struct waitq_set *except_wqset,
867 waitq_link_list_t *free_l);
868
869 /**
870 * @function waitq_set_unlink_all_locked()
871 *
872 * @brief
873 * Unlink all wait queues from this set.
874 *
875 * @discussion
876 * The @c wqset lock might be dropped and reacquired during this call.
877 *
878 * @param wqset the port-set wait queue set to unlink, must be locked.
879 * @param free_l a waitq link list to which links to free will be added.
880 * the caller must call @c waitq_link_free_list() on it.
881 */
882 extern void waitq_set_unlink_all_locked(
883 struct waitq_set *wqset,
884 waitq_link_list_t *free_l);
885
886 /**
887 * @function waitq_set_foreach_member_locked()
888 *
889 * @brief
890 * Iterate all ports members of a port-set wait queue set.
891 *
892 * @param wqset the port-set wait queue set to unlink.
893 * @param cb a block called for each port wait queue in the set.
894 * those wait queues aren't locked (and can't safely
895 * be because @c wqset is locked the whole time
896 * and this would constitute a lock inversion).
897 */
898 extern void waitq_set_foreach_member_locked(
899 struct waitq_set *wqset,
900 void (^cb)(struct waitq *));
901
902 __options_decl(wqs_prepost_flags_t, uint32_t, {
903 WQS_PREPOST_PEEK = 0x1,
904 WQS_PREPOST_LOCK = 0x2,
905 });
906
907 /**
908 * @function waitq_set_first_prepost()
909 *
910 * @brief
911 * Return the first preposted wait queue from the list of preposts of this set.
912 *
913 * @discussion
914 * The @c wqset lock might be dropped and reacquired during this call.
915 *
916 * @param wqset the port-set wait queue set to unlink, must be locked.
917 * @param flags
918 * - if @c WQS_PREPOST_LOCK is set, the returned wait queue is locked
919 * - if @c WQS_PREPOST_PEEK is set, this function assumes that no event
920 * will be dequeued and the prepost list order is unchanged,
921 * else the returned wait queue is put at the end of the prepost list.
922 */
923 struct waitq *waitq_set_first_prepost(
924 struct waitq_set *wqset,
925 wqs_prepost_flags_t flags);
926
927 /**
928 * @function waitq_clear_prepost_locked()
929 *
930 * @brief
931 * Clear all preposts originating from the specified wait queue.
932 *
933 * @discussion
934 * @c waitq must be locked.
935 *
936 * This function only lazily marks the waitq as no longer preposting,
937 * and doesn't clear the preposts for two reasons:
938 * - it avoids some lock contention by not acquiring the set locks,
939 * - it allows for ports that keep receiving messages to keep their slot
940 * in the prepost queue of sets, which improves fairness.
941 *
942 * Sets it is a member of will discover this when a thread
943 * tries to receive through it.
944 */
945 extern void waitq_clear_prepost_locked(
946 struct waitq *waitq);
947
948 /**
949 * @function ipc_pset_prepost()
950 *
951 * @brief
952 * Upcall from the waitq code to prepost to the kevent subsystem.
953 *
954 * @discussion
955 * Called with the pset and waitq locks held.
956 * (in ipc_pset.c).
957 */
958 extern void ipc_pset_prepost(
959 struct waitq_set *wqset,
960 struct waitq *waitq);
961
962 #endif /* MACH_KERNEL_PRIVATE */
963 #pragma mark select wait queues and select port set waitq sets
964
965 extern struct waitq select_conflict_queue;
966
967 /*!
968 * @function select_set_alloc()
969 *
970 * @brief
971 * Allocates a select wait queue set.
972 *
973 * @discussion
974 * select sets assume that they are only manipulated
975 * from the context of the thread they belong to.
976 */
977 extern struct select_set *select_set_alloc(void);
978
979 /*!
980 * @function select_set_free()
981 *
982 * @brief
983 * Frees a select set allocated with @c select_set_alloc().
984 */
985 extern void select_set_free(
986 struct select_set *selset);
987
988 /*!
989 * @function select_set_link()
990 *
991 * @brief
992 * Links a select wait queue into a select wait queue set.
993 *
994 * @param waitq a wait queue of type @c WQT_SELECT.
995 * @param selset a select set
996 * @param linkp a pointer to a linkage allocated
997 * with @c waitq_link_alloc(WQT_SELECT_SET),
998 * which gets niled out if the linkage is used.
999 */
1000 extern void select_set_link(
1001 struct waitq *waitq,
1002 struct select_set *selset,
1003 waitq_link_t *linkp);
1004
1005 /*!
1006 * @function select_set_reset()
1007 *
1008 * @brief
1009 * Resets a select set to prepare it for reuse.
1010 *
1011 * @discussion
1012 * This operation is lazy and will not unlink select wait queues
1013 * from the select set.
1014 */
1015 extern void select_set_reset(
1016 struct select_set *selset);
1017
1018 /*!
1019 * @function select_waitq_wakeup_and_deinit()
1020 *
1021 * @brief
1022 * Combined wakeup, unlink, and deinit under a single lock hold for select().
1023 *
1024 * @discussion
1025 * @c waitq must be a @c WQT_SELECT queue.
1026 */
1027 extern void select_waitq_wakeup_and_deinit(
1028 struct waitq *waitq,
1029 event64_t wake_event,
1030 wait_result_t result);
1031
1032 #endif /* XNU_KERNEL_PRIVATE */
1033
1034 #pragma GCC visibility pop
1035
1036 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1037
1038 #endif /* KERNEL_PRIVATE */
1039 #endif /* _WAITQ_H_ */
1040