xref: /xnu-8020.101.4/osfmk/kern/waitq.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2014-2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #ifndef _WAITQ_H_
29 #define _WAITQ_H_
30 #ifdef  KERNEL_PRIVATE
31 
32 #include <mach/mach_types.h>
33 #include <mach/sync_policy.h>
34 #include <mach/kern_return.h>           /* for kern_return_t */
35 
36 #include <kern/kern_types.h>            /* for wait_queue_t */
37 #include <kern/queue.h>
38 #include <kern/assert.h>
39 
40 #include <sys/cdefs.h>
41 
42 #ifdef XNU_KERNEL_PRIVATE
43 /* priority queue static asserts fail for __ARM64_ARCH_8_32__ kext builds */
44 #include <kern/priority_queue.h>
45 #ifdef MACH_KERNEL_PRIVATE
46 #include <kern/spl.h>
47 #include <kern/ticket_lock.h>
48 #include <kern/circle_queue.h>
49 #include <kern/mpsc_queue.h>
50 
51 #include <machine/cpu_number.h>
52 #include <machine/machine_routines.h> /* machine_timeout_suspended() */
53 #endif /* MACH_KERNEL_PRIVATE */
54 #endif /* XNU_KERNEL_PRIVATE */
55 
56 __BEGIN_DECLS __ASSUME_PTR_ABI_SINGLE_BEGIN
57 
58 #pragma GCC visibility push(hidden)
59 
60 /*
61  * Constants and types used in the waitq APIs
62  */
63 #define WAITQ_ALL_PRIORITIES   (-1)
64 #define WAITQ_PROMOTE_PRIORITY (-2)
65 #define WAITQ_PROMOTE_ON_WAKE  (-3)
66 
67 /* Opaque sizes and alignment used for struct verification */
68 #if __arm__ || __arm64__
69 	#define WQ_OPAQUE_ALIGN   __BIGGEST_ALIGNMENT__
70 	#if __arm__
71 		#define WQ_OPAQUE_SIZE   32
72 	#else
73 		#define WQ_OPAQUE_SIZE   40
74 	#endif
75 #elif __x86_64__
76 	#define WQ_OPAQUE_ALIGN   8
77 	#define WQ_OPAQUE_SIZE   48
78 #else
79 	#error Unknown size requirement
80 #endif
81 
82 #ifdef __cplusplus
83 #define __waitq_transparent_union
84 #else
85 #define __waitq_transparent_union __attribute__((__transparent_union__))
86 #endif
87 
88 /**
89  * @typedef waitq_t
90  *
91  * @brief
92  * This is an abstract typedef used to denote waitq APIs that can be called
93  * on any kind of wait queue (or wait queue set).
94  */
95 typedef union {
96 	struct waitq      *wq_q;
97 	struct waitq_set  *wqs_set;
98 	struct select_set *wqs_sel;
99 } __waitq_transparent_union waitq_t;
100 
101 #if !MACH_KERNEL_PRIVATE
102 
103 /*
104  * The opaque waitq structure is here mostly for AIO and selinfo,
105  * but could potentially be used by other BSD subsystems.
106  */
107 struct waitq {
108 	char opaque[WQ_OPAQUE_SIZE];
109 } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
110 
111 #endif /* MACH_KERNEL_PRIVATE */
112 #ifdef XNU_KERNEL_PRIVATE
113 
114 /**
115  * @typedef waitq_link_t
116  *
117  * @brief
118  * Union that represents any kind of wait queue link.
119  *
120  * @discussion
121  * Unlike @c waitq_t which can be used safely on its own because
122  * @c waitq_type() can return which actual wait queue type is pointed at,
123  * @c waitq_link_t can't be used without knowing the type of wait queue
124  * (or wait queue set) it refers to.
125  */
126 typedef union {
127 	struct waitq_link_hdr   *wqlh;
128 	struct waitq_sellink    *wqls;
129 	struct waitq_link       *wqll;
130 } __waitq_transparent_union waitq_link_t;
131 
132 #define WQL_NULL ((waitq_link_t){ .wqlh = NULL })
133 
134 /**
135  * @typedef waitq_link_list_t
136  *
137  * @brief
138  * List of wait queue links (used for cleanup).
139  *
140  * @discussion
141  * This type is engineered so that the way it links elements is equivalent
142  * to the "forward" linking of a circle queue.
143  */
144 typedef struct waitq_link_list_entry {
145 	struct waitq_link_list_entry *next;
146 } waitq_link_list_t;
147 
148 /**
149  * @enum waitq_type_t
150  *
151  * @brief
152  * List of all possible wait queue (and wait queue set) types.
153  *
154  * @description
155  * (I) mark IRQ safe queues
156  * (P) mark queues that prepost to sets
157  * (S) mark wait queue sets
158  * (keep those together to allow range checks for irq-safe/sets)
159  */
160 __enum_decl(waitq_type_t, uint32_t, {
161 	WQT_INVALID     = 0x0,  /**< ( ) invalid type, unintialized           */
162 	WQT_QUEUE       = 0x1,  /**< (I) general wait queue                   */
163 	WQT_TURNSTILE   = 0x2,  /**< (I) wait queue used in @c turnstile      */
164 	WQT_PORT        = 0x3,  /**< (P) wait queue used in @c ipc_port_t     */
165 	WQT_SELECT      = 0x4,  /**< (P) wait queue used in @c selinfo        */
166 	WQT_PORT_SET    = 0x5,  /**< (S) wait queue set used in @c ipc_pset_t */
167 	WQT_SELECT_SET  = 0x6,  /**< (S) wait queue set used for @c select()  */
168 });
169 
170 #ifdef MACH_KERNEL_PRIVATE
171 #pragma mark Mach-only types and helpers
172 
173 /*
174  * The waitq needs WAITQ_FLAGS_BITS, which leaves 27 or 59 bits
175  * for the eventmask.
176  */
177 #define WAITQ_FLAGS_BITS   5
178 #define _EVENT_MASK_BITS   (8 * sizeof(waitq_flags_t) - WAITQ_FLAGS_BITS)
179 
180 #if __arm64__
181 typedef uint32_t       waitq_flags_t;
182 #else
183 typedef unsigned long  waitq_flags_t;
184 #endif
185 
186 /* Make sure the port abuse of bits doesn't overflow the evntmask size */
187 #define WAITQ_FLAGS_OVERFLOWS(...) \
188 	(sizeof(struct { waitq_flags_t bits : WAITQ_FLAGS_BITS, __VA_ARGS__; }) \
189 	> sizeof(waitq_flags_t))
190 
191 #define WAITQ_FLAGS(prefix, ...) \
192 	struct {                                                               \
193 	    waitq_type_t prefix##_type:3;                                      \
194 	    waitq_flags_t                                                      \
195 	        prefix##_fifo:1,      /* fifo wakeup policy? */                \
196 	        prefix##_preposted:1  /* queue was preposted */                \
197 	            - 2 * WAITQ_FLAGS_OVERFLOWS(__VA_ARGS__),                  \
198 	        __VA_ARGS__;                                                   \
199 	}
200 
201 /*
202  * _type:
203  *     the waitq type (a WQT_* value)
204  *
205  * _fifo:
206  *    whether the wakeup policy is FIFO or LIFO.
207  *
208  * _preposted:
209  *     o WQT_PORT:       the port message queue is not empty
210  *     o WQT_SELECT_SET: has the set been preposted to
211  *     o others:         unused
212  *
213  * _eventmask:
214  *     o WQT_QUEUE:      (global queues) mask events being waited on
215  *     o WQT_PORT:       many bits (see ipc_port_t)
216  *     o WQT_PORT_SET:   port_set index in its space
217  *     o WQT_SELECT_SET: selset_conflict (is the conflict queue hooked)
218  *     o other:          unused
219  *
220  * _interlock:
221  *     The lock of the waitq/waitq_set
222  *
223  * _queue/_prio_queue/_ts:
224  *     o WQT_QUEUE,
225  *       WQT_SELECT,
226  *       WQT_PORT_SET,
227  *       WQT_SELECT_SET: circle queue of waiting threads
228  *     o WQT_TURNSTILE:  priority queue of waiting threads
229  *     o WQT_PORT:       pointer to the receive turnstile of the port
230  *
231  * _links/_inheritor/_sellinks:
232  *     o WQT_PORT:       linkages to WQT_PORT_SET waitq sets
233  *     o WQT_SELECT:     linkages to WQT_SELECT_SET select sets
234  *     o WQT_TURNSTILE:  turnstile inheritor
235  *     o WQT_PORT_SET:   WQT_PORT linkages that haven't preposted
236  *     o other:          unused
237  */
238 #define WAITQ_HDR(prefix, ...) \
239 	WAITQ_FLAGS(prefix, __VA_ARGS__);                                      \
240 	hw_lck_ticket_t         prefix##_interlock;                            \
241 	uint8_t                 prefix##_padding[sizeof(waitq_flags_t) -       \
242 	                                         sizeof(hw_lck_ticket_t)];     \
243 	union {                                                                \
244 	        circle_queue_head_t             prefix##_queue;                \
245 	        struct priority_queue_sched_max prefix##_prio_queue;           \
246 	        struct turnstile               *prefix##_ts;                   \
247 	};                                                                     \
248 	union {                                                                \
249 	        circle_queue_head_t             prefix##_links;                \
250 	        waitq_link_list_t               prefix##_sellinks;             \
251 	        void                           *prefix##_inheritor;            \
252 	        struct mpsc_queue_chain         prefix##_defer;                \
253 	}
254 
255 /**
256  *	@struct waitq
257  *
258  *	@discussion
259  *	This is the definition of the common event wait queue
260  *	that the scheduler APIs understand.  It is used
261  *	internally by the gerneralized event waiting mechanism
262  *	(assert_wait), and also for items that maintain their
263  *	own wait queues (such as ports and semaphores).
264  *
265  *	It is not published to other kernel components.
266  *
267  *	NOTE:  Hardware locks are used to protect event wait
268  *	queues since interrupt code is free to post events to
269  *	them.
270  */
271 struct waitq {
272 	WAITQ_HDR(waitq, waitq_eventmask:_EVENT_MASK_BITS);
273 } __attribute__((aligned(WQ_OPAQUE_ALIGN)));
274 
275 /**
276  * @struct waitq_set
277  *
278  * @brief
279  * This is the definition of a waitq set used in port-sets.
280  *
281  * @discussion
282  * The wqset_index field is used to stash the pset index for debugging
283  * purposes (not the full name as it would truncate).
284  */
285 struct waitq_set {
286 	WAITQ_HDR(wqset, wqset_index:_EVENT_MASK_BITS);
287 	circle_queue_head_t wqset_preposts;
288 };
289 
290 /**
291  * @struct select_set
292  *
293  * @brief
294  * This is the definition of a waitq set used to back the select syscall.
295  */
296 struct select_set {
297 	WAITQ_HDR(selset, selset_conflict:1);
298 	uint64_t selset_id;
299 };
300 
301 static inline waitq_type_t
waitq_type(waitq_t wq)302 waitq_type(waitq_t wq)
303 {
304 	return wq.wq_q->waitq_type;
305 }
306 
307 static inline bool
waitq_same(waitq_t wq1,waitq_t wq2)308 waitq_same(waitq_t wq1, waitq_t wq2)
309 {
310 	return wq1.wq_q == wq2.wq_q;
311 }
312 
313 static inline bool
waitq_is_null(waitq_t wq)314 waitq_is_null(waitq_t wq)
315 {
316 	return wq.wq_q == NULL;
317 }
318 #define waitq_wait_possible(thread)   waitq_is_null((thread)->waitq)
319 
320 static inline bool
waitq_preposts(waitq_t wq)321 waitq_preposts(waitq_t wq)
322 {
323 	switch (waitq_type(wq)) {
324 	case WQT_PORT:
325 	case WQT_SELECT:
326 		return true;
327 	default:
328 		return false;
329 	}
330 }
331 
332 static inline bool
waitq_irq_safe(waitq_t waitq)333 waitq_irq_safe(waitq_t waitq)
334 {
335 	switch (waitq_type(waitq)) {
336 	case WQT_QUEUE:
337 	case WQT_TURNSTILE:
338 		return true;
339 	default:
340 		return false;
341 	}
342 }
343 
344 static inline bool
waitq_valid(waitq_t waitq)345 waitq_valid(waitq_t waitq)
346 {
347 	return waitq.wq_q && waitq.wq_q->waitq_interlock.lck_valid;
348 }
349 
350 /*
351  * global waitqs
352  */
353 extern struct waitq *_global_eventq(char *event, size_t event_length);
354 #define global_eventq(event) _global_eventq((char *)&(event), sizeof(event))
355 
356 #endif  /* MACH_KERNEL_PRIVATE */
357 #pragma mark locking
358 
359 /*!
360  * @function waitq_lock()
361  *
362  * @brief
363  * Lock a wait queue or wait queue set.
364  *
365  * @discussion
366  * It is the responsibility of the caller to disable
367  * interrupts if the queue is IRQ safe.
368  */
369 extern void waitq_lock(waitq_t wq);
370 
371 /*!
372  * @function waitq_unlock()
373  *
374  * @brief
375  * Unlock a wait queue or wait queue set.
376  *
377  * @discussion
378  * It is the responsibility of the caller to reenable
379  * interrupts if the queue is IRQ safe.
380  */
381 extern void waitq_unlock(waitq_t wq);
382 
383 /**
384  * @function waitq_is_valid()
385  *
386  * @brief
387  * Returns whether a wait queue or wait queue set has been invalidated.
388  */
389 extern bool waitq_is_valid(waitq_t wq);
390 
391 #ifdef MACH_KERNEL_PRIVATE
392 
393 /**
394  * @function waitq_invalidate()
395  *
396  * @brief
397  * Invalidate a waitq.
398  *
399  * @discussion
400  * It is the responsibility of the caller to make sure that:
401  * - all waiters are woken up
402  * - linkages and preposts are cleared (non IRQ Safe waitqs).
403  */
404 extern void waitq_invalidate(waitq_t wq);
405 
406 /*!
407  * @function waitq_held()
408  *
409  * @brief
410  * Returns whether someone is holding the lock of the specified wait queue.
411  */
412 extern bool waitq_held(waitq_t wq) __result_use_check;
413 
414 /*!
415  * @function waitq_lock_allow_invalid()
416  *
417  * @brief
418  * Lock the specified wait queue if it is valid.
419  *
420  * @discussion
421  * This function allows for the backing memory of the specified wait queue
422  * to be unmapped.
423  *
424  * Combining this with the zone allocator @c ZC_SEQUESTER feature
425  * (along with @c ZC_ZFREE_CLEARMEM and @c ZC_KASAN_NOQUARANTINE)
426  * allows to create clever schemes (See @c ipc_right_lookup_read()).
427  */
428 extern bool waitq_lock_allow_invalid(waitq_t wq) __result_use_check;
429 
430 /*!
431  * @function waitq_lock_reserve()
432  *
433  * @brief
434  * Reserves the lock of the specified wait queue.
435  *
436  * @discussion
437  * Wait queue locks are "ordered" and a reservation in the lock queue
438  * can be acquired. This can be used to resolve certain lock inversions
439  * without risks for the memory backing the wait queue to disappear.
440  *
441  * See <kern/ticket_lock.h> for details.
442  *
443  * @param wq            the specified wait queue
444  * @param ticket        a pointer to memory to hold the reservation
445  * @returns
446  *     - true if the lock was acquired
447  *     - false otherwise, and @c waitq_lock_wait() @em must be called
448  *       to wait for this ticket.
449  */
450 extern bool waitq_lock_reserve(waitq_t wq, uint32_t *ticket) __result_use_check;
451 
452 /*!
453  * @function waitq_lock_wait()
454  *
455  * @brief
456  * Wait for a ticket acquired with @c waitq_lock_reserve().
457  */
458 extern void waitq_lock_wait(waitq_t wq, uint32_t ticket);
459 
460 /*!
461  * @function waitq_lock_try()
462  *
463  * @brief
464  * Attempts to acquire the lock of the specified wait queue.
465  *
466  * @discussion
467  * Using @c waitq_lock_try() is discouraged as it leads to inefficient
468  * algorithms prone to contention.
469  *
470  * Schemes based on @c waitq_lock_reserve() / @c waitq_lock_wait() is preferred.
471  *
472  */
473 extern bool waitq_lock_try(waitq_t wq) __result_use_check;
474 
475 #endif /* MACH_KERNEL_PRIVATE */
476 #pragma mark assert_wait / wakeup
477 
478 /**
479  * @function waitq_assert_wait64()
480  *
481  * @brief
482  * Declare a thread's intent to wait on @c waitq for @c wait_event.
483  *
484  * @discussion
485  * @c waitq must be unlocked
486  */
487 extern wait_result_t waitq_assert_wait64(
488 	waitq_t                 waitq,
489 	event64_t               wait_event,
490 	wait_interrupt_t        interruptible,
491 	uint64_t                deadline);
492 
493 /**
494  * @function waitq_assert_wait64_leeway()
495  *
496  * @brief
497  * Declare a thread's intent to wait on @c waitq for @c wait_event.
498  *
499  * @discussion
500  * @c waitq must be unlocked
501  */
502 extern wait_result_t waitq_assert_wait64_leeway(
503 	waitq_t                 waitq,
504 	event64_t               wait_event,
505 	wait_interrupt_t        interruptible,
506 	wait_timeout_urgency_t  urgency,
507 	uint64_t                deadline,
508 	uint64_t                leeway);
509 
510 /**
511  * @function waitq_wakeup64_one()
512  *
513  * @brief
514  * Wakeup a single thread from a waitq that's waiting for a given event.
515  *
516  * @discussion
517  * @c waitq must be unlocked
518  */
519 extern kern_return_t waitq_wakeup64_one(
520 	waitq_t                 waitq,
521 	event64_t               wake_event,
522 	wait_result_t           result,
523 	int                     priority);
524 
525 /**
526  * @functiong waitq_wakeup64_all()
527  *
528  * @brief
529  * Wakeup all threads from a waitq that are waiting for a given event.
530  *
531  * @c waitq must be unlocked
532  */
533 extern kern_return_t waitq_wakeup64_all(
534 	waitq_t                 waitq,
535 	event64_t               wake_event,
536 	wait_result_t           result,
537 	int                     priority);
538 
539 /**
540  * @function waitq_wakeup64_identify()
541  *
542  * @brief
543  * Wakeup one thread waiting on 'waitq' for 'wake_event'
544  *
545  * @discussion
546  * @c waitq must be unlocked.
547  *
548  * May temporarily disable and re-enable interrupts
549  *
550  * @returns
551  *     - THREAD_NULL if no thread was waiting
552  *     - a reference to a thread that was waiting on @c waitq.
553  */
554 extern thread_t waitq_wakeup64_identify(
555 	waitq_t                 waitq,
556 	event64_t               wake_event,
557 	wait_result_t           result,
558 	int                     priority);
559 
560 /**
561  * @function waitq_wakeup64_thread()
562  *
563  * @brief
564  * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
565  *
566  * @discussion
567  * @c waitq must be unlocked and must be IRQ safe.
568  * @c thread must be unlocked
569  *
570  * May temporarily disable and re-enable interrupts
571  */
572 extern kern_return_t waitq_wakeup64_thread(
573 	struct waitq           *waitq,
574 	event64_t               wake_event,
575 	thread_t                thread,
576 	wait_result_t           result);
577 
578 #pragma mark Mach-only assert_wait / wakeup
579 #ifdef MACH_KERNEL_PRIVATE
580 
581 typedef enum e_waitq_lock_state {
582 	WAITQ_KEEP_LOCKED    = 0x01,
583 	WAITQ_UNLOCK         = 0x02,
584 } waitq_lock_state_t;
585 
586 __options_decl(waitq_options_t, uint32_t, {
587 	WQ_OPTION_NONE                 = 0,
588 	WQ_OPTION_HANDOFF              = 1,
589 });
590 
591 /**
592  * @function waitq_clear_promotion_locked()
593  *
594  * @brief
595  * Clear a potential thread priority promotion from a waitq wakeup
596  * with @c WAITQ_PROMOTE_PRIORITY.
597  *
598  * @discussion
599  * @c waitq must be locked.
600  *
601  * This must be called on the thread which was woken up
602  * with @c TH_SFLAG_WAITQ_PROMOTED.
603  */
604 extern void waitq_clear_promotion_locked(
605 	waitq_t                 waitq,
606 	thread_t                thread);
607 
608 /**
609  * @function waitq_pull_thread_locked()
610  *
611  * @brief
612  * Remove @c thread from its current blocking state on @c waitq.
613  *
614  * @discussion
615  * This function is only used by clear_wait_internal in sched_prim.c
616  * (which itself is called by the timer wakeup path and clear_wait()).
617  *
618  * @c thread must is locked (the function might drop and reacquire the lock).
619  *
620  * @returns
621  *     - true if the thread has been pulled successfuly.
622  *     - false otherwise, if the thread was no longer waiting on this waitq.
623  */
624 extern bool waitq_pull_thread_locked(
625 	waitq_t                 waitq,
626 	thread_t                thread);
627 
628 /**
629  * @function waitq_assert_wait64_locked()
630  *
631  * @brief
632  * Declare a thread's intent to wait on @c waitq for @c wait_event.
633  *
634  * @discussion
635  * @c waitq must be locked.
636  *
637  * Note that @c waitq might be unlocked and relocked during this call
638  * if it is a waitq set.
639  */
640 extern wait_result_t waitq_assert_wait64_locked(
641 	waitq_t                 waitq,
642 	event64_t               wait_event,
643 	wait_interrupt_t        interruptible,
644 	wait_timeout_urgency_t  urgency,
645 	uint64_t                deadline,
646 	uint64_t                leeway,
647 	thread_t                thread);
648 
649 /**
650  * @function waitq_wakeup64_all_locked()
651  *
652  * @brief
653  * Wakeup all threads waiting on @c waitq for @c wake_event
654  *
655  * @discussion
656  * @c waitq must be locked.
657  *
658  * May temporarily disable and re-enable interrupts
659  * and re-adjust thread priority of each awoken thread.
660  *
661  * If the input @c lock_state is @c WAITQ_UNLOCK then @c waitq will have
662  * been unlocked before calling @c thread_go() on any returned threads,
663  * and is guaranteed to be unlocked upon function return.
664  */
665 extern kern_return_t waitq_wakeup64_all_locked(
666 	waitq_t                 waitq,
667 	event64_t               wake_event,
668 	wait_result_t           result,
669 	int                     priority,
670 	waitq_lock_state_t      lock_state);
671 
672 /**
673  * @function waitq_wakeup64_one_locked()
674  *
675  * @brief
676  * Wakeup one thread waiting on @c waitq for @c wake_event.
677  *
678  * @discussion
679  * @c waitq must be locked.
680  *
681  * May temporarily disable and re-enable interrupts.
682  */
683 extern kern_return_t waitq_wakeup64_one_locked(
684 	waitq_t                 waitq,
685 	event64_t               wake_event,
686 	wait_result_t           result,
687 	int                     priority,
688 	waitq_lock_state_t      lock_state,
689 	waitq_options_t         options);
690 
691 /**
692  * @function waitq_wakeup64_identify_locked()
693  *
694  * @brief
695  * Wakeup one thread waiting on 'waitq' for 'wake_event'
696  *
697  * @returns
698  *     A locked, runnable thread.  If return value is non-NULL,
699  *     interrupts have also been disabled, and the caller
700  *     must call @c splx(*spl).
701  */
702 extern thread_t waitq_wakeup64_identify_locked(
703 	waitq_t                 waitq,
704 	event64_t               wake_event,
705 	wait_result_t           result,
706 	spl_t                  *spl,
707 	int                     priority,
708 	waitq_lock_state_t      lock_state);
709 
710 /**
711  * @function waitq_wakeup64_thread_and_unlock()
712  *
713  * @brief
714  * Wakeup a specific thread iff it's waiting on @c waitq for @c wake_event.
715  *
716  * @discussion
717  * @c waitq must IRQ safe and locked, unlocked on return.
718  * @c thread must be unlocked
719  */
720 extern kern_return_t waitq_wakeup64_thread_and_unlock(
721 	struct waitq           *waitq,
722 	event64_t               wake_event,
723 	thread_t                thread,
724 	wait_result_t           result);
725 
726 #endif /* MACH_KERNEL_PRIVATE */
727 #pragma mark waitq links
728 
729 /*!
730  * @function waitq_link_alloc()
731  *
732  * @brief
733  * Allocates a linkage object to be used with a wait queue of the specified type.
734  */
735 extern waitq_link_t waitq_link_alloc(
736 	waitq_type_t            type);
737 
738 /*!
739  * @function waitq_link_free()
740  *
741  * @brief
742  * Frees a linkage object that was used with a wait queue of the specified type.
743  */
744 extern void waitq_link_free(
745 	waitq_type_t            type,
746 	waitq_link_t            link);
747 
748 /*!
749  * @function waitq_link_free_list()
750  *
751  * @brief
752  * Frees a list of linkage object that was used with a wait queue
753  * of the specified type.
754  */
755 extern void waitq_link_free_list(
756 	waitq_type_t            type,
757 	waitq_link_list_t      *list);
758 
759 #pragma mark wait queues lifecycle
760 
761 /*!
762  * @function waitq_init()
763  *
764  * @brief
765  * Initializes a wait queue.
766  *
767  * @discussion
768  * @c type must be a valid type.
769  */
770 extern void waitq_init(
771 	waitq_t                 waitq,
772 	waitq_type_t            type,
773 	int                     policy);
774 
775 /*!
776  * @function waitq_deinit()
777  *
778  * @brief
779  * Destroys a wait queue.
780  *
781  * @discussion
782  * @c waitq can't be a select set.
783  */
784 extern void waitq_deinit(
785 	waitq_t                 waitq);
786 
787 #pragma mark port wait queues and port set waitq sets
788 #ifdef MACH_KERNEL_PRIVATE
789 
790 /**
791  * @function waitq_link_locked()
792  *
793  * @brief
794  * Link the specified port wait queue to a specified port set wait queue set.
795  *
796  * @discussion
797  * This function doesn't handle preposting/waking up the set
798  * when the wait queue is already preposted.
799  *
800  * @param waitq         the port wait queue to link, must be locked.
801  * @param wqset         the port set wait queue set to link, must be locked.
802  * @param link          a pointer to a link allocated with
803  *                      @c waitq_link_alloc(WQT_PORT_SET).
804  */
805 extern kern_return_t waitq_link_locked(
806 	struct waitq           *waitq,
807 	struct waitq_set       *wqset,
808 	waitq_link_t           *link);
809 
810 /**
811  * @function waitq_link_prepost_locked()
812  *
813  * @brief
814  * Force a given link to be preposted.
815  *
816  * @param waitq         the port wait queue to link, must be locked.
817  * @param wqset         the port set wait queue set to link, must be locked.
818  */
819 extern kern_return_t waitq_link_prepost_locked(
820 	struct waitq           *waitq,
821 	struct waitq_set       *wqset);
822 
823 /**
824  * @function
825  * Unlinks the specified port wait queue from a specified port set wait queue set.
826  *
827  * @param waitq         the port wait queue to unlink, must be locked.
828  * @param wqset         the port set wait queue set to link, must be locked.
829  * @returns
830  *     - @c WQL_NULL if the port wasn't a member of the set.
831  *     - a link to consume with @c waitq_link_free() otherwise.
832  */
833 extern waitq_link_t waitq_unlink_locked(
834 	struct waitq           *waitq,
835 	struct waitq_set       *wqset);
836 
837 /**
838  * @function waitq_unlink_all_locked()
839  *
840  * @brief
841  * Unlink the specified wait queue from all sets to which it belongs
842  *
843  * @param waitq         the port wait queue to link, must be locked.
844  * @param except_wqset  do not unlink this wqset.
845  * @param free_l        a waitq link list to which links to free will be added.
846  *                      the caller must call @c waitq_link_free_list() on it.
847  */
848 extern void waitq_unlink_all_locked(
849 	struct waitq           *waitq,
850 	struct waitq_set       *except_wqset,
851 	waitq_link_list_t      *free_l);
852 
853 /**
854  * @function waitq_set_unlink_all_locked()
855  *
856  * @brief
857  * Unlink all wait queues from this set.
858  *
859  * @discussion
860  * The @c wqset lock might be dropped and reacquired during this call.
861  *
862  * @param wqset         the port-set wait queue set to unlink, must be locked.
863  * @param free_l        a waitq link list to which links to free will be added.
864  *                      the caller must call @c waitq_link_free_list() on it.
865  */
866 extern void waitq_set_unlink_all_locked(
867 	struct waitq_set       *wqset,
868 	waitq_link_list_t      *free_l);
869 
870 /**
871  * @function waitq_set_foreach_member_locked()
872  *
873  * @brief
874  * Iterate all ports members of a port-set wait queue set.
875  *
876  * @param wqset         the port-set wait queue set to unlink.
877  * @param cb            a block called for each port wait queue in the set.
878  *                      those wait queues aren't locked (and can't safely
879  *                      be because @c wqset is locked the whole time
880  *                      and this would constitute a lock inversion).
881  */
882 extern void waitq_set_foreach_member_locked(
883 	struct waitq_set       *wqset,
884 	void                  (^cb)(struct waitq *));
885 
886 __options_decl(wqs_prepost_flags_t, uint32_t, {
887 	WQS_PREPOST_PEEK = 0x1,
888 	WQS_PREPOST_LOCK = 0x2,
889 });
890 
891 /**
892  * @function waitq_set_first_prepost()
893  *
894  * @brief
895  * Return the first preposted wait queue from the list of preposts of this set.
896  *
897  * @discussion
898  * The @c wqset lock might be dropped and reacquired during this call.
899  *
900  * @param wqset         the port-set wait queue set to unlink, must be locked.
901  * @param flags
902  *     - if @c WQS_PREPOST_LOCK is set, the returned wait queue is locked
903  *     - if @c WQS_PREPOST_PEEK is set, this function assumes that no event
904  *       will be dequeued and the prepost list order is unchanged,
905  *       else the returned wait queue is put at the end of the prepost list.
906  */
907 struct waitq *waitq_set_first_prepost(
908 	struct waitq_set       *wqset,
909 	wqs_prepost_flags_t    flags);
910 
911 /**
912  * @function waitq_clear_prepost_locked()
913  *
914  * @brief
915  * Clear all preposts originating from the specified wait queue.
916  *
917  * @discussion
918  * @c waitq must be locked.
919  *
920  * This function only lazily marks the waitq as no longer preposting,
921  * and doesn't clear the preposts for two reasons:
922  * - it avoids some lock contention by not acquiring the set locks,
923  * - it allows for ports that keep receiving messages to keep their slot
924  *   in the prepost queue of sets, which improves fairness.
925  *
926  * Sets it is a member of will discover this when a thread
927  * tries to receive through it.
928  */
929 extern void waitq_clear_prepost_locked(
930 	struct waitq           *waitq);
931 
932 /**
933  * @function ipc_pset_prepost()
934  *
935  * @brief
936  * Upcall from the waitq code to prepost to the kevent subsystem.
937  *
938  * @discussion
939  * Called with the pset and waitq locks held.
940  * (in ipc_pset.c).
941  */
942 extern void ipc_pset_prepost(
943 	struct waitq_set       *wqset,
944 	struct waitq           *waitq);
945 
946 #endif /* MACH_KERNEL_PRIVATE */
947 #pragma mark select wait queues and select port set waitq sets
948 
949 extern struct waitq select_conflict_queue;
950 
951 /*!
952  * @function select_set_alloc()
953  *
954  * @brief
955  * Allocates a select wait queue set.
956  *
957  * @discussion
958  * select sets assume that they are only manipulated
959  * from the context of the thread they belong to.
960  */
961 extern struct select_set *select_set_alloc(void);
962 
963 /*!
964  * @function select_set_free()
965  *
966  * @brief
967  * Frees a select set allocated with @c select_set_alloc().
968  */
969 extern void select_set_free(
970 	struct select_set      *selset);
971 
972 /*!
973  * @function select_set_link()
974  *
975  * @brief
976  * Links a select wait queue into a select wait queue set.
977  *
978  * @param waitq       a wait queue of type @c WQT_SELECT.
979  * @param selset      a select set
980  * @param linkp       a pointer to a linkage allocated
981  *                    with @c waitq_link_alloc(WQT_SELECT_SET),
982  *                    which gets niled out if the linkage is used.
983  */
984 extern void select_set_link(
985 	struct waitq           *waitq,
986 	struct select_set      *selset,
987 	waitq_link_t           *linkp);
988 
989 /*!
990  * @function select_set_reset()
991  *
992  * @brief
993  * Resets a select set to prepare it for reuse.
994  *
995  * @discussion
996  * This operation is lazy and will not unlink select wait queues
997  * from the select set.
998  */
999 extern void select_set_reset(
1000 	struct select_set      *selset);
1001 
1002 /*!
1003  * @function select_waitq_wakeup_and_deinit()
1004  *
1005  * @brief
1006  * Combined wakeup, unlink, and deinit under a single lock hold for select().
1007  *
1008  * @discussion
1009  * @c waitq must be a @c WQT_SELECT queue.
1010  */
1011 extern void select_waitq_wakeup_and_deinit(
1012 	struct waitq           *waitq,
1013 	event64_t               wake_event,
1014 	wait_result_t           result,
1015 	int                     priority);
1016 
1017 #endif /* XNU_KERNEL_PRIVATE */
1018 
1019 #pragma GCC visibility pop
1020 
1021 __ASSUME_PTR_ABI_SINGLE_END __END_DECLS
1022 
1023 #endif  /* KERNEL_PRIVATE */
1024 #endif  /* _WAITQ_H_ */
1025