xref: /xnu-12377.41.6/osfmk/kern/waitq.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2014-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _WAITQ_H_
29 #define _WAITQ_H_
30 #ifdef  KERNEL_PRIVATE
31 
32 #include <mach/mach_types.h>
33 #include <mach/sync_policy.h>
34 #include <mach/kern_return.h>           /* for kern_return_t */
35 
36 #include <kern/kern_types.h>            /* for wait_queue_t */
37 #include <kern/queue.h>
38 #include <kern/assert.h>
39 
40 #include <sys/cdefs.h>
41 
42 #ifdef XNU_KERNEL_PRIVATE
43 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
44 #include <kern/priority_queue.h>
45 #ifdef MACH_KERNEL_PRIVATE
46 #include <kern/spl.h>
47 #include <kern/ticket_lock.h>
48 #include <kern/circle_queue.h>
49 #include <kern/mpsc_queue.h>
50 
51 #include <machine/cpu_number.h>
52 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
53 #endif /* MACH_KERNEL_PRIVATE */
54 #endif /* XNU_KERNEL_PRIVATE */
55 
56 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
57 
58 __exported_push_hidden
59 
60 /*!
61  * @enum waitq_wakeup_flags_t
62  *
63  * @const WAITQ_WAKEUP_DEFAULT
64  * Use the default behavior for wakeup.
65  *
66  * @const WAITQ_UPDATE_INHERITOR
67  * If the wait queue is a turnstile,
68  * set its inheritor to the woken up thread,
69  * or clear the inheritor if the last thread is woken up.
70  *
71  #if MACH_KERNEL_PRIVATE
72  * @const WAITQ_PROMOTE_PRIORITY (Mach IPC only)
73  * Promote the woken up thread(s) with a MINPRI_WAITQ floor,
74  * until it calls waitq_clear_promotion_locked().
75  *
76  * @const WAITQ_UNLOCK (waitq_wakeup64_*_locked only)
77  * Unlock the wait queue before any thread_go() is called for woken up threads.
78  *
79  * @const WAITQ_ENABLE_INTERRUPTS (waitq_wakeup64_*_locked only)
80  * Also enable interrupts when unlocking the wait queue.
81  *
82  * @const WAITQ_KEEP_LOCKED (waitq_wakeup64_*_locked only)
83  * Keep the wait queue locked for this call.
84  *
85  * @const WAITQ_HANDOFF (waitq_wakeup64_one, waitq_wakeup64_identify*)
86  * Attempt a handoff to the woken up thread.
87  #endif
88  */
89 __options_decl(waitq_wakeup_flags_t, uint32_t, {
90 	WAITQ_WAKEUP_DEFAULT    = 0x0000,
91 	WAITQ_UPDATE_INHERITOR  = 0x0001,
92 #if MACH_KERNEL_PRIVATE
93 	WAITQ_PROMOTE_PRIORITY  = 0x0002,
94 	WAITQ_UNLOCK            = 0x0004,
95 	WAITQ_KEEP_LOCKED       = 0x0000,
96 	WAITQ_HANDOFF           = 0x0008,
97 	WAITQ_ENABLE_INTERRUPTS = 0x0010,
98 #endif /* MACH_KERNEL_PRIVATE */
99 });
100 
101 /* Opaque sizes and alignment used for struct verification */
102 #if __arm__ || __arm64__
103 	#define WQ_OPAQUE_ALIGN   __BIGGEST_ALIGNMENT__
104 	#if __arm__
105 		#define WQ_OPAQUE_SIZE   32
106 	#else
107 		#define WQ_OPAQUE_SIZE   40
108 	#endif
109 #elif __x86_64__
110 	#define WQ_OPAQUE_ALIGN   8
111 	#define WQ_OPAQUE_SIZE   48
112 #else
113 	#error Unknown size requirement
114 #endif
115 
116 #ifdef __cplusplus
117 #define __waitq_transparent_union
118 #else
119 #define __waitq_transparent_union __attribute__((__transparent_union__))
120 #endif
121 
122 /**
123  * @typedef waitq_t
124  *
125  * @brief
126  * This is an abstract typedef used to denote waitq APIs that can be called
127  * on any kind of wait queue (or wait queue set).
128  */
129 typedef union {
130 	struct waitq      *wq_q;
131 	struct waitq_set  *wqs_set;
132 	struct select_set *wqs_sel;
133 } __waitq_transparent_union waitq_t;
134 
135 #if !MACH_KERNEL_PRIVATE
136 
137 /*
138  * The opaque waitq structure is here mostly for AIO and selinfo,
139  * but could potentially be used by other BSD subsystems.
140  */
141 struct waitq {
142 	char opaque[WQ_OPAQUE_SIZE];
143 } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
144 
145 #endif /* MACH_KERNEL_PRIVATE */
146 #ifdef XNU_KERNEL_PRIVATE
147 
148 /**
149  * @typedef waitq_link_t
150  *
151  * @brief
152  * Union that represents any kind of wait queue link.
153  *
154  * @discussion
155  * Unlike @c waitq_t which can be used safely on its own because
156  * @c waitq_type() can return which actual wait queue type is pointed at,
157  * @c waitq_link_t can't be used without knowing the type of wait queue
158  * (or wait queue set) it refers to.
159  */
160 typedef union {
161 	struct waitq_link_hdr   *wqlh;
162 	struct waitq_sellink    *wqls;
163 	struct waitq_link       *wqll;
164 } __waitq_transparent_union waitq_link_t;
165 
166 #define WQL_NULL ((waitq_link_t){ .wqlh = NULL })
167 
168 /**
169  * @typedef waitq_link_list_t
170  *
171  * @brief
172  * List of wait queue links (used for cleanup).
173  *
174  * @discussion
175  * This type is engineered so that the way it links elements is equivalent
176  * to the "forward" linking of a circle queue.
177  */
178 typedef struct waitq_link_list_entry {
179 	struct waitq_link_list_entry *next;
180 } waitq_link_list_t;
181 
182 /**
183  * @enum waitq_type_t
184  *
185  * @brief
186  * List of all possible wait queue (and wait queue set) types.
187  *
188  * @description
189  * (I) mark IRQ safe queues
190  * (P) mark queues that prepost to sets
191  * (S) mark wait queue sets
192  * (keep those together to allow range checks for irq-safe/sets)
193  */
194 __enum_decl(waitq_type_t, uint32_t, {
195 	WQT_INVALID     = 0x0,  /**< ( ) invalid type, unintialized           */
196 	WQT_QUEUE       = 0x1,  /**< (I) general wait queue                   */
197 	WQT_TURNSTILE   = 0x2,  /**< (I) wait queue used in @c turnstile      */
198 	WQT_PORT        = 0x3,  /**< (P) wait queue used in @c ipc_port_t     */
199 	WQT_SELECT      = 0x4,  /**< (P) wait queue used in @c selinfo        */
200 	WQT_PORT_SET    = 0x5,  /**< (S) wait queue set used in @c ipc_pset_t */
201 	WQT_SELECT_SET  = 0x6,  /**< (S) wait queue set used for @c select()  */
202 });
203 
204 #ifdef MACH_KERNEL_PRIVATE
205 #pragma mark Mach-only types and helpers
206 
207 /*
208  * The waitq needs WAITQ_FLAGS_BITS, which leaves 27 or 59 bits
209  * for the eventmask.
210  */
211 #define WAITQ_FLAGS_BITS   5
212 #define _EVENT_MASK_BITS   (8 * sizeof(waitq_flags_t) - WAITQ_FLAGS_BITS)
213 
214 #if __arm64__
215 typedef uint32_t       waitq_flags_t;
216 #else
217 typedef unsigned long  waitq_flags_t;
218 #endif
219 
220 /* Make sure the port abuse of bits doesn't overflow the evntmask size */
221 #define WAITQ_FLAGS_OVERFLOWS(...) \
222 	(sizeof(struct { waitq_flags_t bits : WAITQ_FLAGS_BITS, __VA_ARGS__; }) \
223 	> sizeof(waitq_flags_t))
224 
225 #define WAITQ_FLAGS(prefix, ...) \
226 	struct {                                                               \
227 	    waitq_type_t prefix##_type:3;                                      \
228 	    waitq_flags_t                                                      \
229 	        prefix##_fifo:1,      /* fifo wakeup policy? */                \
230 	        prefix##_preposted:1  /* queue was preposted */                \
231 	            - 2 * WAITQ_FLAGS_OVERFLOWS(__VA_ARGS__),                  \
232 	        __VA_ARGS__;                                                   \
233 	}
234 
235 /*
236  * _type:
237  *     the waitq type (a WQT_* value)
238  *
239  * _fifo:
240  *    whether the wakeup policy is FIFO or LIFO.
241  *
242  * _preposted:
243  *     o WQT_PORT:       the port message queue is not empty
244  *     o WQT_SELECT_SET: has the set been preposted to
245  *     o others:         unused
246  *
247  * _eventmask:
248  *     o WQT_QUEUE:      (global queues) mask events being waited on
249  *     o WQT_PORT:       many bits (see ipc_port_t)
250  *     o WQT_PORT_SET:   port_set index in its space
251  *     o WQT_SELECT_SET: selset_conflict (is the conflict queue hooked)
252  *     o other:          unused
253  *
254  * _interlock:
255  *     The lock of the waitq/waitq_set
256  *
257  * _queue/_prio_queue/_ts:
258  *     o WQT_QUEUE,
259  *       WQT_SELECT,
260  *       WQT_PORT_SET,
261  *       WQT_SELECT_SET: circle queue of waiting threads
262  *     o WQT_TURNSTILE:  priority queue of waiting threads
263  *     o WQT_PORT:       pointer to the receive turnstile of the port
264  *
265  * _links/_inheritor/_sellinks:
266  *     o WQT_PORT:       linkages to WQT_PORT_SET waitq sets
267  *     o WQT_SELECT:     linkages to WQT_SELECT_SET select sets
268  *     o WQT_TURNSTILE:  turnstile inheritor
269  *     o WQT_PORT_SET:   WQT_PORT linkages that haven't preposted
270  *     o other:          unused
271  */
272 #define WAITQ_HDR(prefix, ...) \
273 	WAITQ_FLAGS(prefix, __VA_ARGS__);                                      \
274 	hw_lck_ticket_t         prefix##_interlock;                            \
275 	uint8_t                 prefix##_padding[sizeof(waitq_flags_t) -       \
276 	                                         sizeof(hw_lck_ticket_t)];     \
277 	union {                                                                \
278 	        circle_queue_head_t             prefix##_queue;                \
279 	        struct priority_queue_sched_max prefix##_prio_queue;           \
280 	        struct turnstile               *prefix##_ts;                   \
281 	};                                                                     \
282 	union {                                                                \
283 	        circle_queue_head_t             prefix##_links;                \
284 	        waitq_link_list_t               prefix##_sellinks;             \
285 	        void                           *prefix##_inheritor;            \
286 	        struct mpsc_queue_chain         prefix##_defer;                \
287 	}
288 
289 /**
290  *	@struct waitq
291  *
292  *	@discussion
293  *	This is the definition of the common event wait queue
294  *	that the scheduler APIs understand.  It is used
295  *	internally by the gerneralized event waiting mechanism
296  *	(assert_wait), and also for items that maintain their
297  *	own wait queues (such as ports and semaphores).
298  *
299  *	It is not published to other kernel components.
300  *
301  *	NOTE:  Hardware locks are used to protect event wait
302  *	queues since interrupt code is free to post events to
303  *	them.
304  */
305 struct waitq {
306 	WAITQ_HDR(waitq, waitq_eventmask:_EVENT_MASK_BITS);
307 } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
308 
309 /**
310  * @struct waitq_set
311  *
312  * @brief
313  * This is the definition of a waitq set used in port-sets.
314  *
315  * @discussion
316  * The wqset_index field is used to stash the pset index for debugging
317  * purposes (not the full name as it would truncate).
318  */
319 struct waitq_set {
320 	WAITQ_HDR(wqset, wqset_index:_EVENT_MASK_BITS);
321 	circle_queue_head_t wqset_preposts;
322 };
323 
324 /**
325  * @struct select_set
326  *
327  * @brief
328  * This is the definition of a waitq set used to back the select syscall.
329  */
330 struct select_set {
331 	WAITQ_HDR(selset, selset_conflict:1);
332 	uint64_t selset_id;
333 };
334 
335 static inline waitq_type_t
waitq_type(waitq_t wq)336 waitq_type(waitq_t wq)
337 {
338 	return wq.wq_q->waitq_type;
339 }
340 
341 static inline bool
waitq_same(waitq_t wq1,waitq_t wq2)342 waitq_same(waitq_t wq1, waitq_t wq2)
343 {
344 	return wq1.wq_q == wq2.wq_q;
345 }
346 
347 static inline bool
waitq_is_null(waitq_t wq)348 waitq_is_null(waitq_t wq)
349 {
350 	return wq.wq_q == NULL;
351 }
352 
353 /*!
354  * @function waitq_wait_possible()
355  *
356  * @brief
357  * Check if the thread is in a state where it could assert wait.
358  *
359  * @discussion
360  * If a thread is between assert_wait and thread block, another
361  * assert wait is not allowed.
362  */
363 extern bool waitq_wait_possible(thread_t thread);
364 
365 static inline bool
waitq_preposts(waitq_t wq)366 waitq_preposts(waitq_t wq)
367 {
368 	switch (waitq_type(wq)) {
369 	case WQT_PORT:
370 	case WQT_SELECT:
371 		return true;
372 	default:
373 		return false;
374 	}
375 }
376 
377 static inline bool
waitq_irq_safe(waitq_t waitq)378 waitq_irq_safe(waitq_t waitq)
379 {
380 	switch (waitq_type(waitq)) {
381 	case WQT_QUEUE:
382 	case WQT_TURNSTILE:
383 		return true;
384 	default:
385 		return false;
386 	}
387 }
388 
389 static inline bool
waitq_valid(waitq_t waitq)390 waitq_valid(waitq_t waitq)
391 {
392 	return waitq.wq_q && waitq.wq_q->waitq_interlock.lck_valid;
393 }
394 
395 /*
396  * global waitqs
397  */
398 extern struct waitq *_global_eventq(event64_t event) __pure2;
399 #define global_eventq(event) _global_eventq(CAST_EVENT64_T(event))
400 
401 static inline waitq_wakeup_flags_t
waitq_flags_splx(spl_t spl_level)402 waitq_flags_splx(spl_t spl_level)
403 {
404 	return spl_level ? WAITQ_ENABLE_INTERRUPTS : WAITQ_WAKEUP_DEFAULT;
405 }
406 
407 #endif  /* MACH_KERNEL_PRIVATE */
408 #pragma mark locking
409 
410 /*!
411  * @function waitq_lock()
412  *
413  * @brief
414  * Lock a wait queue or wait queue set.
415  *
416  * @discussion
417  * It is the responsibility of the caller to disable
418  * interrupts if the queue is IRQ safe.
419  */
420 extern void waitq_lock(waitq_t wq);
421 
422 /*!
423  * @function waitq_unlock()
424  *
425  * @brief
426  * Unlock a wait queue or wait queue set.
427  *
428  * @discussion
429  * It is the responsibility of the caller to reenable
430  * interrupts if the queue is IRQ safe.
431  */
432 extern void waitq_unlock(waitq_t wq);
433 
434 /**
435  * @function waitq_is_valid()
436  *
437  * @brief
438  * Returns whether a wait queue or wait queue set has been invalidated.
439  */
440 extern bool waitq_is_valid(waitq_t wq);
441 
442 #ifdef MACH_KERNEL_PRIVATE
443 
444 /**
445  * @function waitq_invalidate()
446  *
447  * @brief
448  * Invalidate a waitq.
449  *
450  * @discussion
451  * It is the responsibility of the caller to make sure that:
452  * - all waiters are woken up
453  * - linkages and preposts are cleared (non IRQ Safe waitqs).
454  */
455 extern void waitq_invalidate(waitq_t wq);
456 
457 /*!
458  * @function waitq_held()
459  *
460  * @brief
461  * Returns whether someone is holding the lock of the specified wait queue.
462  */
463 extern bool waitq_held(waitq_t wq) __result_use_check;
464 
465 /*!
466  * @function waitq_lock_allow_invalid()
467  *
468  * @brief
469  * Lock the specified wait queue if it is valid.
470  *
471  * @discussion
472  * This function allows for the backing memory of the specified wait queue
473  * to be unmapped.
474  *
475  * Combining this with the zone allocator @c ZC_SEQUESTER feature
476  * (along with @c ZC_ZFREE_CLEARMEM) allows to create clever schemes
477  * (See @c ipc_right_lookup_read()).
478  */
479 extern bool waitq_lock_allow_invalid(waitq_t wq) __result_use_check;
480 
481 /*!
482  * @function waitq_lock_reserve()
483  *
484  * @brief
485  * Reserves the lock of the specified wait queue.
486  *
487  * @discussion
488  * Wait queue locks are "ordered" and a reservation in the lock queue
489  * can be acquired. This can be used to resolve certain lock inversions
490  * without risks for the memory backing the wait queue to disappear.
491  *
492  * See <kern/ticket_lock.h> for details.
493  *
494  * @param wq            the specified wait queue
495  * @param ticket        a pointer to memory to hold the reservation
496  * @returns
497  *     - true if the lock was acquired
498  *     - false otherwise, and @c waitq_lock_wait() @em must be called
499  *       to wait for this ticket.
500  */
501 extern bool waitq_lock_reserve(waitq_t wq, uint32_t *ticket) __result_use_check;
502 
503 /*!
504  * @function waitq_lock_wait()
505  *
506  * @brief
507  * Wait for a ticket acquired with @c waitq_lock_reserve().
508  */
509 extern void waitq_lock_wait(waitq_t wq, uint32_t ticket);
510 
511 /*!
512  * @function waitq_lock_try()
513  *
514  * @brief
515  * Attempts to acquire the lock of the specified wait queue.
516  *
517  * @discussion
518  * Using @c waitq_lock_try() is discouraged as it leads to inefficient
519  * algorithms prone to contention.
520  *
521  * Schemes based on @c waitq_lock_reserve() / @c waitq_lock_wait() is preferred.
522  *
523  */
524 extern bool waitq_lock_try(waitq_t wq) __result_use_check;
525 
526 #endif /* MACH_KERNEL_PRIVATE */
527 #pragma mark assert_wait / wakeup
528 
529 /**
530  * @function waitq_assert_wait64()
531  *
532  * @brief
533  * Declare a thread's intent to wait on @c waitq for @c wait_event.
534  *
535  * @discussion
536  * @c waitq must be unlocked
537  */
538 extern wait_result_t waitq_assert_wait64(
539 	waitq_t                 waitq,
540 	event64_t               wait_event,
541 	wait_interrupt_t        interruptible,
542 	uint64_t                deadline);
543 
544 /**
545  * @function waitq_assert_wait64_leeway()
546  *
547  * @brief
548  * Declare a thread's intent to wait on @c waitq for @c wait_event.
549  *
550  * @discussion
551  * @c waitq must be unlocked
552  */
553 extern wait_result_t waitq_assert_wait64_leeway(
554 	waitq_t                 waitq,
555 	event64_t               wait_event,
556 	wait_interrupt_t        interruptible,
557 	wait_timeout_urgency_t  urgency,
558 	uint64_t                deadline,
559 	uint64_t                leeway);
560 
561 /**
562  * @function waitq_wakeup64_one()
563  *
564  * @brief
565  * Wakeup a single thread from a waitq that's waiting for a given event.
566  *
567  * @discussion
568  * @c waitq must be unlocked
569  */
570 extern kern_return_t waitq_wakeup64_one(
571 	waitq_t                 waitq,
572 	event64_t               wake_event,
573 	wait_result_t           result,
574 	waitq_wakeup_flags_t    flags);
575 
576 /**
577  * @functiong waitq_wakeup64_nthreads()
578  *
579  * @brief
580  * Wakeup up to nthreads threads from a waitq
581  * that are waiting for a given event.
582  *
583  * @description
584  * This function will set the inheritor of the wait queue
585  * to TURNSTILE_INHERITOR_NULL if it is a turnstile wait queue.
586  *
587  * @c waitq must be unlocked
588  *
589  * @returns how many threads have been woken up
590  */
591 extern uint32_t waitq_wakeup64_nthreads(
592 	waitq_t                 waitq,
593 	event64_t               wake_event,
594 	wait_result_t           result,
595 	waitq_wakeup_flags_t    flags,
596 	uint32_t                nthreads);
597 
598 /**
599  * @functiong waitq_wakeup64_all()
600  *
601  * @brief
602  * Wakeup all threads from a waitq that are waiting for a given event.
603  *
604  * @description
605  * This function will set the inheritor of the wait queue
606  * to TURNSTILE_INHERITOR_NULL if it is a turnstile wait queue.
607  *
608  * @c waitq must be unlocked
609  */
610 extern kern_return_t waitq_wakeup64_all(
611 	waitq_t                 waitq,
612 	event64_t               wake_event,
613 	wait_result_t           result,
614 	waitq_wakeup_flags_t    flags);
615 
616 /**
617  * @function waitq_wakeup64_identify()
618  *
619  * @brief
620  * Wakeup one thread waiting on 'waitq' for 'wake_event'
621  *
622  * @discussion
623  * @c waitq must be unlocked.
624  *
625  * May temporarily disable and re-enable interrupts
626  *
627  * @returns
628  *     - THREAD_NULL if no thread was waiting
629  *     - a reference to a thread that was waiting on @c waitq.
630  */
631 extern thread_t waitq_wakeup64_identify(
632 	waitq_t                 waitq,
633 	event64_t               wake_event,
634 	wait_result_t           result,
635 	waitq_wakeup_flags_t    flags);
636 
637 /**
638  * @function waitq_wakeup64_thread()
639  *
640  * @brief
641  * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
642  *
643  * @discussion
644  * @c waitq must be unlocked and must be IRQ safe.
645  * @c thread must be unlocked
646  *
647  * May temporarily disable and re-enable interrupts
648  */
649 extern kern_return_t waitq_wakeup64_thread(
650 	struct waitq           *waitq,
651 	event64_t               wake_event,
652 	thread_t                thread,
653 	wait_result_t           result);
654 
655 #pragma mark Mach-only assert_wait / wakeup
656 #ifdef MACH_KERNEL_PRIVATE
657 
658 /**
659  * @function waitq_clear_promotion_locked()
660  *
661  * @brief
662  * Clear a potential thread priority promotion from a waitq wakeup
663  * with @c WAITQ_PROMOTE_PRIORITY.
664  *
665  * @discussion
666  * @c waitq must be locked.
667  *
668  * This must be called on the thread which was woken up
669  * with @c TH_SFLAG_WAITQ_PROMOTED.
670  */
671 extern void waitq_clear_promotion_locked(
672 	waitq_t                 waitq,
673 	thread_t                thread);
674 
675 /**
676  * @function waitq_pull_thread_locked()
677  *
678  * @brief
679  * Remove @c thread from its current blocking state on @c waitq.
680  *
681  * @discussion
682  * This function is only used by clear_wait_internal in sched_prim.c
683  * (which itself is called by the timer wakeup path and clear_wait()).
684  *
685  * @c thread must is locked (the function might drop and reacquire the lock).
686  *
687  * @returns
688  *     - true if the thread has been pulled successfuly.
689  *     - false otherwise, if the thread was no longer waiting on this waitq.
690  */
691 extern bool waitq_pull_thread_locked(
692 	waitq_t                 waitq,
693 	thread_t                thread);
694 
695 /**
696  * @function waitq_assert_wait64_locked()
697  *
698  * @brief
699  * Declare a thread's intent to wait on @c waitq for @c wait_event.
700  *
701  * @discussion
702  * @c waitq must be locked.
703  *
704  * Note that @c waitq might be unlocked and relocked during this call
705  * if it is a waitq set.
706  */
707 extern wait_result_t waitq_assert_wait64_locked(
708 	waitq_t                 waitq,
709 	event64_t               wait_event,
710 	wait_interrupt_t        interruptible,
711 	wait_timeout_urgency_t  urgency,
712 	uint64_t                deadline,
713 	uint64_t                leeway,
714 	thread_t                thread);
715 
716 /**
717  * @function waitq_wakeup64_all_locked()
718  *
719  * @brief
720  * Wakeup all threads waiting on @c waitq for @c wake_event
721  *
722  * @discussion
723  * @c waitq must be locked.
724  *
725  * May temporarily disable and re-enable interrupts
726  * and re-adjust thread priority of each awoken thread.
727  */
728 extern kern_return_t waitq_wakeup64_all_locked(
729 	waitq_t                 waitq,
730 	event64_t               wake_event,
731 	wait_result_t           result,
732 	waitq_wakeup_flags_t    flags);
733 
734 /**
735  * @function waitq_wakeup64_nthreads_locked()
736  *
737  * @brief
738  * Wakeup up to nthreads threads waiting on @c waitq for @c wake_event.
739  *
740  * @discussion
741  * @c waitq must be locked.
742  *
743  * May temporarily disable and re-enable interrupts.
744  *
745  * @returns how many threads have been woken up
746  */
747 extern uint32_t waitq_wakeup64_nthreads_locked(
748 	waitq_t                 waitq,
749 	event64_t               wake_event,
750 	wait_result_t           result,
751 	waitq_wakeup_flags_t    flags,
752 	uint32_t                nthreads);
753 
754 /**
755  * @function waitq_wakeup64_one_locked()
756  *
757  * @brief
758  * Wakeup one thread waiting on @c waitq for @c wake_event.
759  *
760  * @discussion
761  * @c waitq must be locked.
762  *
763  * May temporarily disable and re-enable interrupts.
764  */
765 extern kern_return_t waitq_wakeup64_one_locked(
766 	waitq_t                 waitq,
767 	event64_t               wake_event,
768 	wait_result_t           result,
769 	waitq_wakeup_flags_t    flags);
770 
771 /**
772  * @function waitq_wakeup64_identify_locked()
773  *
774  * @brief
775  * Wakeup one thread waiting on 'waitq' for 'wake_event'
776  *
777  * @returns
778  *     Returns a thread that is pulled from waitq but not set runnable yet.
779  *     Must be paired with waitq_resume_identified_thread to set it runnable -
780  *     between these two points preemption is disabled.
781  */
782 extern thread_t waitq_wakeup64_identify_locked(
783 	waitq_t                 waitq,
784 	event64_t               wake_event,
785 	waitq_wakeup_flags_t    flags);
786 
787 /**
788  * @function waitq_resume_identified_thread()
789  *
790  * @brief
791  * Set a thread runnable that has been woken with waitq_wakeup64_identify_locked
792  */
793 extern void waitq_resume_identified_thread(
794 	waitq_t                 waitq,
795 	thread_t                thread,
796 	wait_result_t           result,
797 	waitq_wakeup_flags_t    flags);
798 
799 /**
800  * @function waitq_resume_and_bind_identified_thread()
801  *
802  * @brief
803  * Set a thread runnable that has been woken with
804  * waitq_wakeup64_identify_locked, and bind it to a processor at the same time.
805  */
806 extern void waitq_resume_and_bind_identified_thread(
807 	waitq_t                 waitq,
808 	thread_t                thread,
809 	processor_t             processor,
810 	wait_result_t           result,
811 	waitq_wakeup_flags_t    flags);
812 
813 /**
814  * @function waitq_wakeup64_thread_and_unlock()
815  *
816  * @brief
817  * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
818  *
819  * @discussion
820  * @c waitq must IRQ safe and locked, unlocked on return.
821  * @c thread must be unlocked
822  */
823 extern kern_return_t waitq_wakeup64_thread_and_unlock(
824 	struct waitq           *waitq,
825 	event64_t               wake_event,
826 	thread_t                thread,
827 	wait_result_t           result);
828 
829 #endif /* MACH_KERNEL_PRIVATE */
830 #pragma mark waitq links
831 
832 /*!
833  * @function waitq_link_alloc()
834  *
835  * @brief
836  * Allocates a linkage object to be used with a wait queue of the specified type.
837  */
838 extern waitq_link_t waitq_link_alloc(
839 	waitq_type_t            type);
840 
841 /*!
842  * @function waitq_link_free()
843  *
844  * @brief
845  * Frees a linkage object that was used with a wait queue of the specified type.
846  */
847 extern void waitq_link_free(
848 	waitq_type_t            type,
849 	waitq_link_t            link);
850 
851 /*!
852  * @function waitq_link_free_list()
853  *
854  * @brief
855  * Frees a list of linkage object that was used with a wait queue
856  * of the specified type.
857  */
858 extern void waitq_link_free_list(
859 	waitq_type_t            type,
860 	waitq_link_list_t      *list);
861 
862 #pragma mark wait queues lifecycle
863 
864 /*!
865  * @function waitq_init()
866  *
867  * @brief
868  * Initializes a wait queue.
869  *
870  * @discussion
871  * @c type must be a valid type.
872  */
873 extern void waitq_init(
874 	waitq_t                 waitq,
875 	waitq_type_t            type,
876 	int                     policy);
877 
878 /*!
879  * @function waitq_deinit()
880  *
881  * @brief
882  * Destroys a wait queue.
883  *
884  * @discussion
885  * @c waitq can't be a select set.
886  */
887 extern void waitq_deinit(
888 	waitq_t                 waitq);
889 
890 #pragma mark port wait queues and port set waitq sets
891 #ifdef MACH_KERNEL_PRIVATE
892 
893 /**
894  * @function waitq_link_locked()
895  *
896  * @brief
897  * Link the specified port wait queue to a specified port set wait queue set.
898  *
899  * @discussion
900  * This function doesn't handle preposting/waking up the set
901  * when the wait queue is already preposted.
902  *
903  * @param waitq         the port wait queue to link, must be locked.
904  * @param wqset         the port set wait queue set to link, must be locked.
905  * @param link          a pointer to a link allocated with
906  *                      @c waitq_link_alloc(WQT_PORT_SET).
907  */
908 extern kern_return_t waitq_link_locked(
909 	struct waitq           *waitq,
910 	struct waitq_set       *wqset,
911 	waitq_link_t           *link);
912 
913 /**
914  * @function waitq_link_prepost_locked()
915  *
916  * @brief
917  * Force a given link to be preposted.
918  *
919  * @param waitq         the port wait queue to link, must be locked.
920  * @param wqset         the port set wait queue set to link, must be locked.
921  */
922 extern kern_return_t waitq_link_prepost_locked(
923 	struct waitq           *waitq,
924 	struct waitq_set       *wqset);
925 
926 /**
927  * @function
928  * Unlinks the specified port wait queue from a specified port set wait queue set.
929  *
930  * @param waitq         the port wait queue to unlink, must be locked.
931  * @param wqset         the port set wait queue set to link, must be locked.
932  * @returns
933  *     - @c WQL_NULL if the port wasn't a member of the set.
934  *     - a link to consume with @c waitq_link_free() otherwise.
935  */
936 extern waitq_link_t waitq_unlink_locked(
937 	struct waitq           *waitq,
938 	struct waitq_set       *wqset);
939 
940 /**
941  * @function waitq_unlink_all_locked()
942  *
943  * @brief
944  * Unlink the specified wait queue from all sets to which it belongs
945  *
946  * @param waitq         the port wait queue to link, must be locked.
947  * @param except_wqset  do not unlink this wqset.
948  * @param free_l        a waitq link list to which links to free will be added.
949  *                      the caller must call @c waitq_link_free_list() on it.
950  */
951 extern void waitq_unlink_all_locked(
952 	struct waitq           *waitq,
953 	struct waitq_set       *except_wqset,
954 	waitq_link_list_t      *free_l);
955 
956 /**
957  * @function waitq_set_unlink_all_locked()
958  *
959  * @brief
960  * Unlink all wait queues from this set.
961  *
962  * @discussion
963  * The @c wqset lock might be dropped and reacquired during this call.
964  *
965  * @param wqset         the port-set wait queue set to unlink, must be locked.
966  * @param free_l        a waitq link list to which links to free will be added.
967  *                      the caller must call @c waitq_link_free_list() on it.
968  */
969 extern void waitq_set_unlink_all_locked(
970 	struct waitq_set       *wqset,
971 	waitq_link_list_t      *free_l);
972 
973 /**
974  * @function waitq_set_foreach_member_locked()
975  *
976  * @brief
977  * Iterate all ports members of a port-set wait queue set.
978  *
979  * @param wqset         the port-set wait queue set to unlink.
980  * @param cb            a block called for each port wait queue in the set.
981  *                      those wait queues aren't locked (and can't safely
982  *                      be because @c wqset is locked the whole time
983  *                      and this would constitute a lock inversion).
984  */
985 extern void waitq_set_foreach_member_locked(
986 	struct waitq_set       *wqset,
987 	void                  (^cb)(struct waitq *));
988 
989 __options_decl(wqs_prepost_flags_t, uint32_t, {
990 	WQS_PREPOST_PEEK = 0x1,
991 	WQS_PREPOST_LOCK = 0x2,
992 });
993 
994 /**
995  * @function waitq_set_first_prepost()
996  *
997  * @brief
998  * Return the first preposted wait queue from the list of preposts of this set.
999  *
1000  * @discussion
1001  * The @c wqset lock might be dropped and reacquired during this call.
1002  *
1003  * @param wqset         the port-set wait queue set to unlink, must be locked.
1004  * @param flags
1005  *     - if @c WQS_PREPOST_LOCK is set, the returned wait queue is locked
1006  *     - if @c WQS_PREPOST_PEEK is set, this function assumes that no event
1007  *       will be dequeued and the prepost list order is unchanged,
1008  *       else the returned wait queue is put at the end of the prepost list.
1009  */
1010 struct waitq *waitq_set_first_prepost(
1011 	struct waitq_set       *wqset,
1012 	wqs_prepost_flags_t    flags);
1013 
1014 /**
1015  * @function waitq_clear_prepost_locked()
1016  *
1017  * @brief
1018  * Clear all preposts originating from the specified wait queue.
1019  *
1020  * @discussion
1021  * @c waitq must be locked.
1022  *
1023  * This function only lazily marks the waitq as no longer preposting,
1024  * and doesn't clear the preposts for two reasons:
1025  * - it avoids some lock contention by not acquiring the set locks,
1026  * - it allows for ports that keep receiving messages to keep their slot
1027  *   in the prepost queue of sets, which improves fairness.
1028  *
1029  * Sets it is a member of will discover this when a thread
1030  * tries to receive through it.
1031  */
1032 extern void waitq_clear_prepost_locked(
1033 	struct waitq           *waitq);
1034 
1035 /**
1036  * @function ipc_pset_prepost()
1037  *
1038  * @brief
1039  * Upcall from the waitq code to prepost to the kevent subsystem.
1040  *
1041  * @discussion
1042  * Called with the pset and waitq locks held.
1043  * (in ipc_pset.c).
1044  */
1045 extern void ipc_pset_prepost(
1046 	struct waitq_set       *wqset,
1047 	struct waitq           *waitq);
1048 
1049 #endif /* MACH_KERNEL_PRIVATE */
1050 #pragma mark select wait queues and select port set waitq sets
1051 
1052 extern struct waitq select_conflict_queue;
1053 
1054 /*!
1055  * @function select_set_alloc()
1056  *
1057  * @brief
1058  * Allocates a select wait queue set.
1059  *
1060  * @discussion
1061  * select sets assume that they are only manipulated
1062  * from the context of the thread they belong to.
1063  */
1064 extern struct select_set *select_set_alloc(void);
1065 
1066 /*!
1067  * @function select_set_free()
1068  *
1069  * @brief
1070  * Frees a select set allocated with @c select_set_alloc().
1071  */
1072 extern void select_set_free(
1073 	struct select_set      *selset);
1074 
1075 /*!
1076  * @function select_set_link()
1077  *
1078  * @brief
1079  * Links a select wait queue into a select wait queue set.
1080  *
1081  * @param waitq       a wait queue of type @c WQT_SELECT.
1082  * @param selset      a select set
1083  * @param linkp       a pointer to a linkage allocated
1084  *                    with @c waitq_link_alloc(WQT_SELECT_SET),
1085  *                    which gets niled out if the linkage is used.
1086  */
1087 extern void select_set_link(
1088 	struct waitq           *waitq,
1089 	struct select_set      *selset,
1090 	waitq_link_t           *linkp);
1091 
1092 /*!
1093  * @function select_set_reset()
1094  *
1095  * @brief
1096  * Resets a select set to prepare it for reuse.
1097  *
1098  * @discussion
1099  * This operation is lazy and will not unlink select wait queues
1100  * from the select set.
1101  */
1102 extern void select_set_reset(
1103 	struct select_set      *selset);
1104 
1105 /*!
1106  * @function select_waitq_wakeup_and_deinit()
1107  *
1108  * @brief
1109  * Combined wakeup, unlink, and deinit under a single lock hold for select().
1110  *
1111  * @discussion
1112  * @c waitq must be a @c WQT_SELECT queue.
1113  */
1114 extern void select_waitq_wakeup_and_deinit(
1115 	struct waitq           *waitq,
1116 	event64_t               wake_event,
1117 	wait_result_t           result);
1118 
1119 #endif /* XNU_KERNEL_PRIVATE */
1120 
1121 __exported_pop
1122 
1123 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1124 
1125 #endif  /* KERNEL_PRIVATE */
1126 #endif  /* _WAITQ_H_ */
1127