xref: /xnu-8020.121.3/bsd/pthread/pthread_workqueue.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1*fdd8201dSApple OSS Distributions /*
2*fdd8201dSApple OSS Distributions  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3*fdd8201dSApple OSS Distributions  *
4*fdd8201dSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*fdd8201dSApple OSS Distributions  *
6*fdd8201dSApple OSS Distributions  * This file contains Original Code and/or Modifications of Original Code
7*fdd8201dSApple OSS Distributions  * as defined in and that are subject to the Apple Public Source License
8*fdd8201dSApple OSS Distributions  * Version 2.0 (the 'License'). You may not use this file except in
9*fdd8201dSApple OSS Distributions  * compliance with the License. The rights granted to you under the License
10*fdd8201dSApple OSS Distributions  * may not be used to create, or enable the creation or redistribution of,
11*fdd8201dSApple OSS Distributions  * unlawful or unlicensed copies of an Apple operating system, or to
12*fdd8201dSApple OSS Distributions  * circumvent, violate, or enable the circumvention or violation of, any
13*fdd8201dSApple OSS Distributions  * terms of an Apple operating system software license agreement.
14*fdd8201dSApple OSS Distributions  *
15*fdd8201dSApple OSS Distributions  * Please obtain a copy of the License at
16*fdd8201dSApple OSS Distributions  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*fdd8201dSApple OSS Distributions  *
18*fdd8201dSApple OSS Distributions  * The Original Code and all software distributed under the License are
19*fdd8201dSApple OSS Distributions  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*fdd8201dSApple OSS Distributions  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*fdd8201dSApple OSS Distributions  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*fdd8201dSApple OSS Distributions  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*fdd8201dSApple OSS Distributions  * Please see the License for the specific language governing rights and
24*fdd8201dSApple OSS Distributions  * limitations under the License.
25*fdd8201dSApple OSS Distributions  *
26*fdd8201dSApple OSS Distributions  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*fdd8201dSApple OSS Distributions  */
28*fdd8201dSApple OSS Distributions /* Copyright (c) 1995-2018 Apple, Inc. All Rights Reserved */
29*fdd8201dSApple OSS Distributions 
30*fdd8201dSApple OSS Distributions #include <sys/cdefs.h>
31*fdd8201dSApple OSS Distributions 
32*fdd8201dSApple OSS Distributions #include <kern/assert.h>
33*fdd8201dSApple OSS Distributions #include <kern/ast.h>
34*fdd8201dSApple OSS Distributions #include <kern/clock.h>
35*fdd8201dSApple OSS Distributions #include <kern/cpu_data.h>
36*fdd8201dSApple OSS Distributions #include <kern/kern_types.h>
37*fdd8201dSApple OSS Distributions #include <kern/policy_internal.h>
38*fdd8201dSApple OSS Distributions #include <kern/processor.h>
39*fdd8201dSApple OSS Distributions #include <kern/sched_prim.h>    /* for thread_exception_return */
40*fdd8201dSApple OSS Distributions #include <kern/task.h>
41*fdd8201dSApple OSS Distributions #include <kern/thread.h>
42*fdd8201dSApple OSS Distributions #include <kern/thread_group.h>
43*fdd8201dSApple OSS Distributions #include <kern/zalloc.h>
44*fdd8201dSApple OSS Distributions #include <mach/kern_return.h>
45*fdd8201dSApple OSS Distributions #include <mach/mach_param.h>
46*fdd8201dSApple OSS Distributions #include <mach/mach_port.h>
47*fdd8201dSApple OSS Distributions #include <mach/mach_types.h>
48*fdd8201dSApple OSS Distributions #include <mach/mach_vm.h>
49*fdd8201dSApple OSS Distributions #include <mach/sync_policy.h>
50*fdd8201dSApple OSS Distributions #include <mach/task.h>
51*fdd8201dSApple OSS Distributions #include <mach/thread_act.h> /* for thread_resume */
52*fdd8201dSApple OSS Distributions #include <mach/thread_policy.h>
53*fdd8201dSApple OSS Distributions #include <mach/thread_status.h>
54*fdd8201dSApple OSS Distributions #include <mach/vm_prot.h>
55*fdd8201dSApple OSS Distributions #include <mach/vm_statistics.h>
56*fdd8201dSApple OSS Distributions #include <machine/atomic.h>
57*fdd8201dSApple OSS Distributions #include <machine/machine_routines.h>
58*fdd8201dSApple OSS Distributions #include <machine/smp.h>
59*fdd8201dSApple OSS Distributions #include <vm/vm_map.h>
60*fdd8201dSApple OSS Distributions #include <vm/vm_protos.h>
61*fdd8201dSApple OSS Distributions 
62*fdd8201dSApple OSS Distributions #include <sys/eventvar.h>
63*fdd8201dSApple OSS Distributions #include <sys/kdebug.h>
64*fdd8201dSApple OSS Distributions #include <sys/kernel.h>
65*fdd8201dSApple OSS Distributions #include <sys/lock.h>
66*fdd8201dSApple OSS Distributions #include <sys/param.h>
67*fdd8201dSApple OSS Distributions #include <sys/proc_info.h>      /* for fill_procworkqueue */
68*fdd8201dSApple OSS Distributions #include <sys/proc_internal.h>
69*fdd8201dSApple OSS Distributions #include <sys/pthread_shims.h>
70*fdd8201dSApple OSS Distributions #include <sys/resourcevar.h>
71*fdd8201dSApple OSS Distributions #include <sys/signalvar.h>
72*fdd8201dSApple OSS Distributions #include <sys/sysctl.h>
73*fdd8201dSApple OSS Distributions #include <sys/sysproto.h>
74*fdd8201dSApple OSS Distributions #include <sys/systm.h>
75*fdd8201dSApple OSS Distributions #include <sys/ulock.h> /* for ulock_owner_value_to_port_name */
76*fdd8201dSApple OSS Distributions 
77*fdd8201dSApple OSS Distributions #include <pthread/bsdthread_private.h>
78*fdd8201dSApple OSS Distributions #include <pthread/workqueue_syscalls.h>
79*fdd8201dSApple OSS Distributions #include <pthread/workqueue_internal.h>
80*fdd8201dSApple OSS Distributions #include <pthread/workqueue_trace.h>
81*fdd8201dSApple OSS Distributions 
82*fdd8201dSApple OSS Distributions #include <os/log.h>
83*fdd8201dSApple OSS Distributions 
84*fdd8201dSApple OSS Distributions static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
85*fdd8201dSApple OSS Distributions static void workq_schedule_creator(proc_t p, struct workqueue *wq,
86*fdd8201dSApple OSS Distributions     workq_kern_threadreq_flags_t flags);
87*fdd8201dSApple OSS Distributions 
88*fdd8201dSApple OSS Distributions static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
89*fdd8201dSApple OSS Distributions     workq_threadreq_t req);
90*fdd8201dSApple OSS Distributions 
91*fdd8201dSApple OSS Distributions static uint32_t workq_constrained_allowance(struct workqueue *wq,
92*fdd8201dSApple OSS Distributions     thread_qos_t at_qos, struct uthread *uth, bool may_start_timer);
93*fdd8201dSApple OSS Distributions 
94*fdd8201dSApple OSS Distributions static bool _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq);
95*fdd8201dSApple OSS Distributions 
96*fdd8201dSApple OSS Distributions static bool workq_thread_is_busy(uint64_t cur_ts,
97*fdd8201dSApple OSS Distributions     _Atomic uint64_t *lastblocked_tsp);
98*fdd8201dSApple OSS Distributions 
99*fdd8201dSApple OSS Distributions static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
100*fdd8201dSApple OSS Distributions 
101*fdd8201dSApple OSS Distributions static bool
102*fdd8201dSApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags);
103*fdd8201dSApple OSS Distributions 
104*fdd8201dSApple OSS Distributions static inline void
105*fdd8201dSApple OSS Distributions workq_lock_spin(struct workqueue *wq);
106*fdd8201dSApple OSS Distributions 
107*fdd8201dSApple OSS Distributions static inline void
108*fdd8201dSApple OSS Distributions workq_unlock(struct workqueue *wq);
109*fdd8201dSApple OSS Distributions 
110*fdd8201dSApple OSS Distributions #pragma mark globals
111*fdd8201dSApple OSS Distributions 
112*fdd8201dSApple OSS Distributions struct workq_usec_var {
113*fdd8201dSApple OSS Distributions 	uint32_t usecs;
114*fdd8201dSApple OSS Distributions 	uint64_t abstime;
115*fdd8201dSApple OSS Distributions };
116*fdd8201dSApple OSS Distributions 
117*fdd8201dSApple OSS Distributions #define WORKQ_SYSCTL_USECS(var, init) \
118*fdd8201dSApple OSS Distributions 	        static struct workq_usec_var var = { .usecs = init }; \
119*fdd8201dSApple OSS Distributions 	        SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
120*fdd8201dSApple OSS Distributions 	                        CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
121*fdd8201dSApple OSS Distributions 	                        workq_sysctl_handle_usecs, "I", "")
122*fdd8201dSApple OSS Distributions 
123*fdd8201dSApple OSS Distributions static LCK_GRP_DECLARE(workq_lck_grp, "workq");
124*fdd8201dSApple OSS Distributions os_refgrp_decl(static, workq_refgrp, "workq", NULL);
125*fdd8201dSApple OSS Distributions 
126*fdd8201dSApple OSS Distributions static ZONE_DEFINE(workq_zone_workqueue, "workq.wq",
127*fdd8201dSApple OSS Distributions     sizeof(struct workqueue), ZC_NONE);
128*fdd8201dSApple OSS Distributions static ZONE_DEFINE(workq_zone_threadreq, "workq.threadreq",
129*fdd8201dSApple OSS Distributions     sizeof(struct workq_threadreq_s), ZC_CACHING);
130*fdd8201dSApple OSS Distributions 
131*fdd8201dSApple OSS Distributions static struct mpsc_daemon_queue workq_deallocate_queue;
132*fdd8201dSApple OSS Distributions 
133*fdd8201dSApple OSS Distributions WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
134*fdd8201dSApple OSS Distributions WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
135*fdd8201dSApple OSS Distributions WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
136*fdd8201dSApple OSS Distributions static uint32_t wq_max_threads              = WORKQUEUE_MAXTHREADS;
137*fdd8201dSApple OSS Distributions static uint32_t wq_max_constrained_threads  = WORKQUEUE_MAXTHREADS / 8;
138*fdd8201dSApple OSS Distributions static uint32_t wq_init_constrained_limit   = 1;
139*fdd8201dSApple OSS Distributions static uint16_t wq_death_max_load;
140*fdd8201dSApple OSS Distributions static uint32_t wq_max_parallelism[WORKQ_NUM_QOS_BUCKETS];
141*fdd8201dSApple OSS Distributions 
142*fdd8201dSApple OSS Distributions /*
143*fdd8201dSApple OSS Distributions  * This is not a hard limit but the max size we want to aim to hit across the
144*fdd8201dSApple OSS Distributions  * entire cooperative pool. We can oversubscribe the pool due to non-cooperative
145*fdd8201dSApple OSS Distributions  * workers and the max we will oversubscribe the pool by, is a total of
146*fdd8201dSApple OSS Distributions  * wq_max_cooperative_threads * WORKQ_NUM_QOS_BUCKETS.
147*fdd8201dSApple OSS Distributions  */
148*fdd8201dSApple OSS Distributions static uint32_t wq_max_cooperative_threads;
149*fdd8201dSApple OSS Distributions 
150*fdd8201dSApple OSS Distributions static inline uint32_t
wq_cooperative_queue_max_size(struct workqueue * wq)151*fdd8201dSApple OSS Distributions wq_cooperative_queue_max_size(struct workqueue *wq)
152*fdd8201dSApple OSS Distributions {
153*fdd8201dSApple OSS Distributions 	return wq->wq_cooperative_queue_has_limited_max_size ? 1 : wq_max_cooperative_threads;
154*fdd8201dSApple OSS Distributions }
155*fdd8201dSApple OSS Distributions 
156*fdd8201dSApple OSS Distributions #pragma mark sysctls
157*fdd8201dSApple OSS Distributions 
158*fdd8201dSApple OSS Distributions static int
159*fdd8201dSApple OSS Distributions workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS
160*fdd8201dSApple OSS Distributions {
161*fdd8201dSApple OSS Distributions #pragma unused(arg2)
162*fdd8201dSApple OSS Distributions 	struct workq_usec_var *v = arg1;
163*fdd8201dSApple OSS Distributions 	int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
164*fdd8201dSApple OSS Distributions 	if (error || !req->newptr) {
165*fdd8201dSApple OSS Distributions 		return error;
166*fdd8201dSApple OSS Distributions 	}
167*fdd8201dSApple OSS Distributions 	clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
168*fdd8201dSApple OSS Distributions 	    &v->abstime);
169*fdd8201dSApple OSS Distributions 	return 0;
170*fdd8201dSApple OSS Distributions }
171*fdd8201dSApple OSS Distributions 
172*fdd8201dSApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
173*fdd8201dSApple OSS Distributions     &wq_max_threads, 0, "");
174*fdd8201dSApple OSS Distributions 
175*fdd8201dSApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
176*fdd8201dSApple OSS Distributions     &wq_max_constrained_threads, 0, "");
177*fdd8201dSApple OSS Distributions 
178*fdd8201dSApple OSS Distributions static int
179*fdd8201dSApple OSS Distributions wq_limit_cooperative_threads_for_proc SYSCTL_HANDLER_ARGS
180*fdd8201dSApple OSS Distributions {
181*fdd8201dSApple OSS Distributions #pragma unused(arg1, arg2, oidp)
182*fdd8201dSApple OSS Distributions 	int input_pool_size = 0;
183*fdd8201dSApple OSS Distributions 	int changed;
184*fdd8201dSApple OSS Distributions 	int error = 0;
185*fdd8201dSApple OSS Distributions 
186*fdd8201dSApple OSS Distributions 	error = sysctl_io_number(req, 0, sizeof(int), &input_pool_size, &changed);
187*fdd8201dSApple OSS Distributions 	if (error || !changed) {
188*fdd8201dSApple OSS Distributions 		return error;
189*fdd8201dSApple OSS Distributions 	}
190*fdd8201dSApple OSS Distributions 
191*fdd8201dSApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_DEFAULT 0
192*fdd8201dSApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS -1
193*fdd8201dSApple OSS Distributions /* Not available currently, but sysctl interface is designed to allow these
194*fdd8201dSApple OSS Distributions  * extra parameters:
195*fdd8201dSApple OSS Distributions  *		WQ_COOPERATIVE_POOL_SIZE_STRICT : -2 (across all bucket)
196*fdd8201dSApple OSS Distributions  *		WQ_COOPERATIVE_POOL_SIZE_CUSTOM : [1, 512]
197*fdd8201dSApple OSS Distributions  */
198*fdd8201dSApple OSS Distributions 
199*fdd8201dSApple OSS Distributions 	if (input_pool_size != WQ_COOPERATIVE_POOL_SIZE_DEFAULT
200*fdd8201dSApple OSS Distributions 	    && input_pool_size != WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS) {
201*fdd8201dSApple OSS Distributions 		error = EINVAL;
202*fdd8201dSApple OSS Distributions 		goto out;
203*fdd8201dSApple OSS Distributions 	}
204*fdd8201dSApple OSS Distributions 
205*fdd8201dSApple OSS Distributions 	proc_t p = req->p;
206*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
207*fdd8201dSApple OSS Distributions 
208*fdd8201dSApple OSS Distributions 	if (wq != NULL) {
209*fdd8201dSApple OSS Distributions 		workq_lock_spin(wq);
210*fdd8201dSApple OSS Distributions 		if (wq->wq_reqcount > 0 || wq->wq_nthreads > 0) {
211*fdd8201dSApple OSS Distributions 			// Hackily enforce that the workqueue is still new (no requests or
212*fdd8201dSApple OSS Distributions 			// threads)
213*fdd8201dSApple OSS Distributions 			error = ENOTSUP;
214*fdd8201dSApple OSS Distributions 		} else {
215*fdd8201dSApple OSS Distributions 			wq->wq_cooperative_queue_has_limited_max_size = (input_pool_size == WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS);
216*fdd8201dSApple OSS Distributions 		}
217*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
218*fdd8201dSApple OSS Distributions 	} else {
219*fdd8201dSApple OSS Distributions 		/* This process has no workqueue, calling this syctl makes no sense */
220*fdd8201dSApple OSS Distributions 		return ENOTSUP;
221*fdd8201dSApple OSS Distributions 	}
222*fdd8201dSApple OSS Distributions 
223*fdd8201dSApple OSS Distributions out:
224*fdd8201dSApple OSS Distributions 	return error;
225*fdd8201dSApple OSS Distributions }
226*fdd8201dSApple OSS Distributions 
227*fdd8201dSApple OSS Distributions SYSCTL_PROC(_kern, OID_AUTO, wq_limit_cooperative_threads,
228*fdd8201dSApple OSS Distributions     CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_WR | CTLFLAG_LOCKED | CTLTYPE_INT, 0, 0,
229*fdd8201dSApple OSS Distributions     wq_limit_cooperative_threads_for_proc,
230*fdd8201dSApple OSS Distributions     "I", "Modify the max pool size of the cooperative pool");
231*fdd8201dSApple OSS Distributions 
232*fdd8201dSApple OSS Distributions #pragma mark p_wqptr
233*fdd8201dSApple OSS Distributions 
234*fdd8201dSApple OSS Distributions #define WQPTR_IS_INITING_VALUE ((struct workqueue *)~(uintptr_t)0)
235*fdd8201dSApple OSS Distributions 
236*fdd8201dSApple OSS Distributions static struct workqueue *
proc_get_wqptr_fast(struct proc * p)237*fdd8201dSApple OSS Distributions proc_get_wqptr_fast(struct proc *p)
238*fdd8201dSApple OSS Distributions {
239*fdd8201dSApple OSS Distributions 	return os_atomic_load(&p->p_wqptr, relaxed);
240*fdd8201dSApple OSS Distributions }
241*fdd8201dSApple OSS Distributions 
242*fdd8201dSApple OSS Distributions struct workqueue *
proc_get_wqptr(struct proc * p)243*fdd8201dSApple OSS Distributions proc_get_wqptr(struct proc *p)
244*fdd8201dSApple OSS Distributions {
245*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
246*fdd8201dSApple OSS Distributions 	return wq == WQPTR_IS_INITING_VALUE ? NULL : wq;
247*fdd8201dSApple OSS Distributions }
248*fdd8201dSApple OSS Distributions 
249*fdd8201dSApple OSS Distributions static void
proc_set_wqptr(struct proc * p,struct workqueue * wq)250*fdd8201dSApple OSS Distributions proc_set_wqptr(struct proc *p, struct workqueue *wq)
251*fdd8201dSApple OSS Distributions {
252*fdd8201dSApple OSS Distributions 	wq = os_atomic_xchg(&p->p_wqptr, wq, release);
253*fdd8201dSApple OSS Distributions 	if (wq == WQPTR_IS_INITING_VALUE) {
254*fdd8201dSApple OSS Distributions 		proc_lock(p);
255*fdd8201dSApple OSS Distributions 		thread_wakeup(&p->p_wqptr);
256*fdd8201dSApple OSS Distributions 		proc_unlock(p);
257*fdd8201dSApple OSS Distributions 	}
258*fdd8201dSApple OSS Distributions }
259*fdd8201dSApple OSS Distributions 
260*fdd8201dSApple OSS Distributions static bool
proc_init_wqptr_or_wait(struct proc * p)261*fdd8201dSApple OSS Distributions proc_init_wqptr_or_wait(struct proc *p)
262*fdd8201dSApple OSS Distributions {
263*fdd8201dSApple OSS Distributions 	struct workqueue *wq;
264*fdd8201dSApple OSS Distributions 
265*fdd8201dSApple OSS Distributions 	proc_lock(p);
266*fdd8201dSApple OSS Distributions 	wq = os_atomic_load(&p->p_wqptr, relaxed);
267*fdd8201dSApple OSS Distributions 
268*fdd8201dSApple OSS Distributions 	if (wq == NULL) {
269*fdd8201dSApple OSS Distributions 		os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
270*fdd8201dSApple OSS Distributions 		proc_unlock(p);
271*fdd8201dSApple OSS Distributions 		return true;
272*fdd8201dSApple OSS Distributions 	}
273*fdd8201dSApple OSS Distributions 
274*fdd8201dSApple OSS Distributions 	if (wq == WQPTR_IS_INITING_VALUE) {
275*fdd8201dSApple OSS Distributions 		assert_wait(&p->p_wqptr, THREAD_UNINT);
276*fdd8201dSApple OSS Distributions 		proc_unlock(p);
277*fdd8201dSApple OSS Distributions 		thread_block(THREAD_CONTINUE_NULL);
278*fdd8201dSApple OSS Distributions 	} else {
279*fdd8201dSApple OSS Distributions 		proc_unlock(p);
280*fdd8201dSApple OSS Distributions 	}
281*fdd8201dSApple OSS Distributions 	return false;
282*fdd8201dSApple OSS Distributions }
283*fdd8201dSApple OSS Distributions 
284*fdd8201dSApple OSS Distributions static inline event_t
workq_parked_wait_event(struct uthread * uth)285*fdd8201dSApple OSS Distributions workq_parked_wait_event(struct uthread *uth)
286*fdd8201dSApple OSS Distributions {
287*fdd8201dSApple OSS Distributions 	return (event_t)&uth->uu_workq_stackaddr;
288*fdd8201dSApple OSS Distributions }
289*fdd8201dSApple OSS Distributions 
290*fdd8201dSApple OSS Distributions static inline void
workq_thread_wakeup(struct uthread * uth)291*fdd8201dSApple OSS Distributions workq_thread_wakeup(struct uthread *uth)
292*fdd8201dSApple OSS Distributions {
293*fdd8201dSApple OSS Distributions 	thread_wakeup_thread(workq_parked_wait_event(uth), get_machthread(uth));
294*fdd8201dSApple OSS Distributions }
295*fdd8201dSApple OSS Distributions 
296*fdd8201dSApple OSS Distributions #pragma mark wq_thactive
297*fdd8201dSApple OSS Distributions 
298*fdd8201dSApple OSS Distributions #if defined(__LP64__)
299*fdd8201dSApple OSS Distributions // Layout is:
300*fdd8201dSApple OSS Distributions //   127 - 115 : 13 bits of zeroes
301*fdd8201dSApple OSS Distributions //   114 - 112 : best QoS among all pending constrained requests
302*fdd8201dSApple OSS Distributions //   111 -   0 : MGR, AUI, UI, IN, DF, UT, BG+MT buckets every 16 bits
303*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 16
304*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT    (7 * WQ_THACTIVE_BUCKET_WIDTH)
305*fdd8201dSApple OSS Distributions #else
306*fdd8201dSApple OSS Distributions // Layout is:
307*fdd8201dSApple OSS Distributions //   63 - 61 : best QoS among all pending constrained requests
308*fdd8201dSApple OSS Distributions //   60      : Manager bucket (0 or 1)
309*fdd8201dSApple OSS Distributions //   59 -  0 : AUI, UI, IN, DF, UT, BG+MT buckets every 10 bits
310*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 10
311*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT    (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
312*fdd8201dSApple OSS Distributions #endif
313*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_BUCKET_MASK  ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
314*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_BUCKET_HALF  (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
315*fdd8201dSApple OSS Distributions 
316*fdd8201dSApple OSS Distributions static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
317*fdd8201dSApple OSS Distributions     "Make sure we have space to encode a QoS");
318*fdd8201dSApple OSS Distributions 
319*fdd8201dSApple OSS Distributions static inline wq_thactive_t
_wq_thactive(struct workqueue * wq)320*fdd8201dSApple OSS Distributions _wq_thactive(struct workqueue *wq)
321*fdd8201dSApple OSS Distributions {
322*fdd8201dSApple OSS Distributions 	return os_atomic_load_wide(&wq->wq_thactive, relaxed);
323*fdd8201dSApple OSS Distributions }
324*fdd8201dSApple OSS Distributions 
325*fdd8201dSApple OSS Distributions static inline uint8_t
_wq_bucket(thread_qos_t qos)326*fdd8201dSApple OSS Distributions _wq_bucket(thread_qos_t qos)
327*fdd8201dSApple OSS Distributions {
328*fdd8201dSApple OSS Distributions 	// Map both BG and MT to the same bucket by over-shifting down and
329*fdd8201dSApple OSS Distributions 	// clamping MT and BG together.
330*fdd8201dSApple OSS Distributions 	switch (qos) {
331*fdd8201dSApple OSS Distributions 	case THREAD_QOS_MAINTENANCE:
332*fdd8201dSApple OSS Distributions 		return 0;
333*fdd8201dSApple OSS Distributions 	default:
334*fdd8201dSApple OSS Distributions 		return qos - 2;
335*fdd8201dSApple OSS Distributions 	}
336*fdd8201dSApple OSS Distributions }
337*fdd8201dSApple OSS Distributions 
338*fdd8201dSApple OSS Distributions #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
339*fdd8201dSApple OSS Distributions 	        ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
340*fdd8201dSApple OSS Distributions 
341*fdd8201dSApple OSS Distributions static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue * wq)342*fdd8201dSApple OSS Distributions _wq_thactive_best_constrained_req_qos(struct workqueue *wq)
343*fdd8201dSApple OSS Distributions {
344*fdd8201dSApple OSS Distributions 	// Avoid expensive atomic operations: the three bits we're loading are in
345*fdd8201dSApple OSS Distributions 	// a single byte, and always updated under the workqueue lock
346*fdd8201dSApple OSS Distributions 	wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
347*fdd8201dSApple OSS Distributions 	return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
348*fdd8201dSApple OSS Distributions }
349*fdd8201dSApple OSS Distributions 
350*fdd8201dSApple OSS Distributions static void
_wq_thactive_refresh_best_constrained_req_qos(struct workqueue * wq)351*fdd8201dSApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq)
352*fdd8201dSApple OSS Distributions {
353*fdd8201dSApple OSS Distributions 	thread_qos_t old_qos, new_qos;
354*fdd8201dSApple OSS Distributions 	workq_threadreq_t req;
355*fdd8201dSApple OSS Distributions 
356*fdd8201dSApple OSS Distributions 	req = priority_queue_max(&wq->wq_constrained_queue,
357*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
358*fdd8201dSApple OSS Distributions 	new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
359*fdd8201dSApple OSS Distributions 	old_qos = _wq_thactive_best_constrained_req_qos(wq);
360*fdd8201dSApple OSS Distributions 	if (old_qos != new_qos) {
361*fdd8201dSApple OSS Distributions 		long delta = (long)new_qos - (long)old_qos;
362*fdd8201dSApple OSS Distributions 		wq_thactive_t v = (wq_thactive_t)delta << WQ_THACTIVE_QOS_SHIFT;
363*fdd8201dSApple OSS Distributions 		/*
364*fdd8201dSApple OSS Distributions 		 * We can do an atomic add relative to the initial load because updates
365*fdd8201dSApple OSS Distributions 		 * to this qos are always serialized under the workqueue lock.
366*fdd8201dSApple OSS Distributions 		 */
367*fdd8201dSApple OSS Distributions 		v = os_atomic_add(&wq->wq_thactive, v, relaxed);
368*fdd8201dSApple OSS Distributions #ifdef __LP64__
369*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
370*fdd8201dSApple OSS Distributions 		    (uint64_t)(v >> 64), 0);
371*fdd8201dSApple OSS Distributions #else
372*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0);
373*fdd8201dSApple OSS Distributions #endif
374*fdd8201dSApple OSS Distributions 	}
375*fdd8201dSApple OSS Distributions }
376*fdd8201dSApple OSS Distributions 
377*fdd8201dSApple OSS Distributions static inline wq_thactive_t
_wq_thactive_offset_for_qos(thread_qos_t qos)378*fdd8201dSApple OSS Distributions _wq_thactive_offset_for_qos(thread_qos_t qos)
379*fdd8201dSApple OSS Distributions {
380*fdd8201dSApple OSS Distributions 	uint8_t bucket = _wq_bucket(qos);
381*fdd8201dSApple OSS Distributions 	__builtin_assume(bucket < WORKQ_NUM_BUCKETS);
382*fdd8201dSApple OSS Distributions 	return (wq_thactive_t)1 << (bucket * WQ_THACTIVE_BUCKET_WIDTH);
383*fdd8201dSApple OSS Distributions }
384*fdd8201dSApple OSS Distributions 
385*fdd8201dSApple OSS Distributions static inline wq_thactive_t
_wq_thactive_inc(struct workqueue * wq,thread_qos_t qos)386*fdd8201dSApple OSS Distributions _wq_thactive_inc(struct workqueue *wq, thread_qos_t qos)
387*fdd8201dSApple OSS Distributions {
388*fdd8201dSApple OSS Distributions 	wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
389*fdd8201dSApple OSS Distributions 	return os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
390*fdd8201dSApple OSS Distributions }
391*fdd8201dSApple OSS Distributions 
392*fdd8201dSApple OSS Distributions static inline wq_thactive_t
_wq_thactive_dec(struct workqueue * wq,thread_qos_t qos)393*fdd8201dSApple OSS Distributions _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos)
394*fdd8201dSApple OSS Distributions {
395*fdd8201dSApple OSS Distributions 	wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
396*fdd8201dSApple OSS Distributions 	return os_atomic_sub_orig(&wq->wq_thactive, v, relaxed);
397*fdd8201dSApple OSS Distributions }
398*fdd8201dSApple OSS Distributions 
399*fdd8201dSApple OSS Distributions static inline void
_wq_thactive_move(struct workqueue * wq,thread_qos_t old_qos,thread_qos_t new_qos)400*fdd8201dSApple OSS Distributions _wq_thactive_move(struct workqueue *wq,
401*fdd8201dSApple OSS Distributions     thread_qos_t old_qos, thread_qos_t new_qos)
402*fdd8201dSApple OSS Distributions {
403*fdd8201dSApple OSS Distributions 	wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
404*fdd8201dSApple OSS Distributions 	    _wq_thactive_offset_for_qos(old_qos);
405*fdd8201dSApple OSS Distributions 	os_atomic_add(&wq->wq_thactive, v, relaxed);
406*fdd8201dSApple OSS Distributions 	wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
407*fdd8201dSApple OSS Distributions 	wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
408*fdd8201dSApple OSS Distributions }
409*fdd8201dSApple OSS Distributions 
410*fdd8201dSApple OSS Distributions static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue * wq,wq_thactive_t v,thread_qos_t qos,uint32_t * busycount,uint32_t * max_busycount)411*fdd8201dSApple OSS Distributions _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
412*fdd8201dSApple OSS Distributions     thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
413*fdd8201dSApple OSS Distributions {
414*fdd8201dSApple OSS Distributions 	uint32_t count = 0, active;
415*fdd8201dSApple OSS Distributions 	uint64_t curtime;
416*fdd8201dSApple OSS Distributions 
417*fdd8201dSApple OSS Distributions 	assert(WORKQ_THREAD_QOS_MIN <= qos && qos <= WORKQ_THREAD_QOS_MAX);
418*fdd8201dSApple OSS Distributions 
419*fdd8201dSApple OSS Distributions 	if (busycount) {
420*fdd8201dSApple OSS Distributions 		curtime = mach_absolute_time();
421*fdd8201dSApple OSS Distributions 		*busycount = 0;
422*fdd8201dSApple OSS Distributions 	}
423*fdd8201dSApple OSS Distributions 	if (max_busycount) {
424*fdd8201dSApple OSS Distributions 		*max_busycount = THREAD_QOS_LAST - qos;
425*fdd8201dSApple OSS Distributions 	}
426*fdd8201dSApple OSS Distributions 
427*fdd8201dSApple OSS Distributions 	uint8_t i = _wq_bucket(qos);
428*fdd8201dSApple OSS Distributions 	v >>= i * WQ_THACTIVE_BUCKET_WIDTH;
429*fdd8201dSApple OSS Distributions 	for (; i < WORKQ_NUM_QOS_BUCKETS; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
430*fdd8201dSApple OSS Distributions 		active = v & WQ_THACTIVE_BUCKET_MASK;
431*fdd8201dSApple OSS Distributions 		count += active;
432*fdd8201dSApple OSS Distributions 
433*fdd8201dSApple OSS Distributions 		if (busycount && wq->wq_thscheduled_count[i] > active) {
434*fdd8201dSApple OSS Distributions 			if (workq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
435*fdd8201dSApple OSS Distributions 				/*
436*fdd8201dSApple OSS Distributions 				 * We only consider the last blocked thread for a given bucket
437*fdd8201dSApple OSS Distributions 				 * as busy because we don't want to take the list lock in each
438*fdd8201dSApple OSS Distributions 				 * sched callback. However this is an approximation that could
439*fdd8201dSApple OSS Distributions 				 * contribute to thread creation storms.
440*fdd8201dSApple OSS Distributions 				 */
441*fdd8201dSApple OSS Distributions 				(*busycount)++;
442*fdd8201dSApple OSS Distributions 			}
443*fdd8201dSApple OSS Distributions 		}
444*fdd8201dSApple OSS Distributions 	}
445*fdd8201dSApple OSS Distributions 
446*fdd8201dSApple OSS Distributions 	return count;
447*fdd8201dSApple OSS Distributions }
448*fdd8201dSApple OSS Distributions 
449*fdd8201dSApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_dec(struct workqueue * wq,thread_qos_t qos)450*fdd8201dSApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(struct workqueue *wq, thread_qos_t qos)
451*fdd8201dSApple OSS Distributions {
452*fdd8201dSApple OSS Distributions 	__assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]--;
453*fdd8201dSApple OSS Distributions 	assert(old_scheduled_count > 0);
454*fdd8201dSApple OSS Distributions }
455*fdd8201dSApple OSS Distributions 
456*fdd8201dSApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_inc(struct workqueue * wq,thread_qos_t qos)457*fdd8201dSApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(struct workqueue *wq, thread_qos_t qos)
458*fdd8201dSApple OSS Distributions {
459*fdd8201dSApple OSS Distributions 	__assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]++;
460*fdd8201dSApple OSS Distributions 	assert(old_scheduled_count < UINT8_MAX);
461*fdd8201dSApple OSS Distributions }
462*fdd8201dSApple OSS Distributions 
463*fdd8201dSApple OSS Distributions #pragma mark wq_flags
464*fdd8201dSApple OSS Distributions 
465*fdd8201dSApple OSS Distributions static inline uint32_t
_wq_flags(struct workqueue * wq)466*fdd8201dSApple OSS Distributions _wq_flags(struct workqueue *wq)
467*fdd8201dSApple OSS Distributions {
468*fdd8201dSApple OSS Distributions 	return os_atomic_load(&wq->wq_flags, relaxed);
469*fdd8201dSApple OSS Distributions }
470*fdd8201dSApple OSS Distributions 
471*fdd8201dSApple OSS Distributions static inline bool
_wq_exiting(struct workqueue * wq)472*fdd8201dSApple OSS Distributions _wq_exiting(struct workqueue *wq)
473*fdd8201dSApple OSS Distributions {
474*fdd8201dSApple OSS Distributions 	return _wq_flags(wq) & WQ_EXITING;
475*fdd8201dSApple OSS Distributions }
476*fdd8201dSApple OSS Distributions 
477*fdd8201dSApple OSS Distributions bool
workq_is_exiting(struct proc * p)478*fdd8201dSApple OSS Distributions workq_is_exiting(struct proc *p)
479*fdd8201dSApple OSS Distributions {
480*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
481*fdd8201dSApple OSS Distributions 	return !wq || _wq_exiting(wq);
482*fdd8201dSApple OSS Distributions }
483*fdd8201dSApple OSS Distributions 
484*fdd8201dSApple OSS Distributions 
485*fdd8201dSApple OSS Distributions #pragma mark workqueue lock
486*fdd8201dSApple OSS Distributions 
487*fdd8201dSApple OSS Distributions static bool
workq_lock_is_acquired_kdp(struct workqueue * wq)488*fdd8201dSApple OSS Distributions workq_lock_is_acquired_kdp(struct workqueue *wq)
489*fdd8201dSApple OSS Distributions {
490*fdd8201dSApple OSS Distributions 	return kdp_lck_ticket_is_acquired(&wq->wq_lock);
491*fdd8201dSApple OSS Distributions }
492*fdd8201dSApple OSS Distributions 
493*fdd8201dSApple OSS Distributions static inline void
workq_lock_spin(struct workqueue * wq)494*fdd8201dSApple OSS Distributions workq_lock_spin(struct workqueue *wq)
495*fdd8201dSApple OSS Distributions {
496*fdd8201dSApple OSS Distributions 	lck_ticket_lock(&wq->wq_lock, &workq_lck_grp);
497*fdd8201dSApple OSS Distributions }
498*fdd8201dSApple OSS Distributions 
499*fdd8201dSApple OSS Distributions static inline void
workq_lock_held(struct workqueue * wq)500*fdd8201dSApple OSS Distributions workq_lock_held(struct workqueue *wq)
501*fdd8201dSApple OSS Distributions {
502*fdd8201dSApple OSS Distributions 	LCK_TICKET_ASSERT_OWNED(&wq->wq_lock);
503*fdd8201dSApple OSS Distributions }
504*fdd8201dSApple OSS Distributions 
505*fdd8201dSApple OSS Distributions static inline bool
workq_lock_try(struct workqueue * wq)506*fdd8201dSApple OSS Distributions workq_lock_try(struct workqueue *wq)
507*fdd8201dSApple OSS Distributions {
508*fdd8201dSApple OSS Distributions 	return lck_ticket_lock_try(&wq->wq_lock, &workq_lck_grp);
509*fdd8201dSApple OSS Distributions }
510*fdd8201dSApple OSS Distributions 
511*fdd8201dSApple OSS Distributions static inline void
workq_unlock(struct workqueue * wq)512*fdd8201dSApple OSS Distributions workq_unlock(struct workqueue *wq)
513*fdd8201dSApple OSS Distributions {
514*fdd8201dSApple OSS Distributions 	lck_ticket_unlock(&wq->wq_lock);
515*fdd8201dSApple OSS Distributions }
516*fdd8201dSApple OSS Distributions 
517*fdd8201dSApple OSS Distributions #pragma mark idle thread lists
518*fdd8201dSApple OSS Distributions 
519*fdd8201dSApple OSS Distributions #define WORKQ_POLICY_INIT(qos) \
520*fdd8201dSApple OSS Distributions 	        (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
521*fdd8201dSApple OSS Distributions 
522*fdd8201dSApple OSS Distributions static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)523*fdd8201dSApple OSS Distributions workq_pri_bucket(struct uu_workq_policy req)
524*fdd8201dSApple OSS Distributions {
525*fdd8201dSApple OSS Distributions 	return MAX(MAX(req.qos_req, req.qos_max), req.qos_override);
526*fdd8201dSApple OSS Distributions }
527*fdd8201dSApple OSS Distributions 
528*fdd8201dSApple OSS Distributions static inline thread_qos_t
workq_pri_override(struct uu_workq_policy req)529*fdd8201dSApple OSS Distributions workq_pri_override(struct uu_workq_policy req)
530*fdd8201dSApple OSS Distributions {
531*fdd8201dSApple OSS Distributions 	return MAX(workq_pri_bucket(req), req.qos_bucket);
532*fdd8201dSApple OSS Distributions }
533*fdd8201dSApple OSS Distributions 
534*fdd8201dSApple OSS Distributions static inline bool
workq_thread_needs_params_change(workq_threadreq_t req,struct uthread * uth)535*fdd8201dSApple OSS Distributions workq_thread_needs_params_change(workq_threadreq_t req, struct uthread *uth)
536*fdd8201dSApple OSS Distributions {
537*fdd8201dSApple OSS Distributions 	workq_threadreq_param_t cur_trp, req_trp = { };
538*fdd8201dSApple OSS Distributions 
539*fdd8201dSApple OSS Distributions 	cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
540*fdd8201dSApple OSS Distributions 	if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
541*fdd8201dSApple OSS Distributions 		req_trp = kqueue_threadreq_workloop_param(req);
542*fdd8201dSApple OSS Distributions 	}
543*fdd8201dSApple OSS Distributions 
544*fdd8201dSApple OSS Distributions 	/*
545*fdd8201dSApple OSS Distributions 	 * CPU percent flags are handled separately to policy changes, so ignore
546*fdd8201dSApple OSS Distributions 	 * them for all of these checks.
547*fdd8201dSApple OSS Distributions 	 */
548*fdd8201dSApple OSS Distributions 	uint16_t cur_flags = (cur_trp.trp_flags & ~TRP_CPUPERCENT);
549*fdd8201dSApple OSS Distributions 	uint16_t req_flags = (req_trp.trp_flags & ~TRP_CPUPERCENT);
550*fdd8201dSApple OSS Distributions 
551*fdd8201dSApple OSS Distributions 	if (!req_flags && !cur_flags) {
552*fdd8201dSApple OSS Distributions 		return false;
553*fdd8201dSApple OSS Distributions 	}
554*fdd8201dSApple OSS Distributions 
555*fdd8201dSApple OSS Distributions 	if (req_flags != cur_flags) {
556*fdd8201dSApple OSS Distributions 		return true;
557*fdd8201dSApple OSS Distributions 	}
558*fdd8201dSApple OSS Distributions 
559*fdd8201dSApple OSS Distributions 	if ((req_flags & TRP_PRIORITY) && req_trp.trp_pri != cur_trp.trp_pri) {
560*fdd8201dSApple OSS Distributions 		return true;
561*fdd8201dSApple OSS Distributions 	}
562*fdd8201dSApple OSS Distributions 
563*fdd8201dSApple OSS Distributions 	if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
564*fdd8201dSApple OSS Distributions 		return true;
565*fdd8201dSApple OSS Distributions 	}
566*fdd8201dSApple OSS Distributions 
567*fdd8201dSApple OSS Distributions 	return false;
568*fdd8201dSApple OSS Distributions }
569*fdd8201dSApple OSS Distributions 
570*fdd8201dSApple OSS Distributions static inline bool
workq_thread_needs_priority_change(workq_threadreq_t req,struct uthread * uth)571*fdd8201dSApple OSS Distributions workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth)
572*fdd8201dSApple OSS Distributions {
573*fdd8201dSApple OSS Distributions 	if (workq_thread_needs_params_change(req, uth)) {
574*fdd8201dSApple OSS Distributions 		return true;
575*fdd8201dSApple OSS Distributions 	}
576*fdd8201dSApple OSS Distributions 
577*fdd8201dSApple OSS Distributions 	if (req->tr_qos != workq_pri_override(uth->uu_workq_pri)) {
578*fdd8201dSApple OSS Distributions 		return true;
579*fdd8201dSApple OSS Distributions 	}
580*fdd8201dSApple OSS Distributions 
581*fdd8201dSApple OSS Distributions #if CONFIG_PREADOPT_TG
582*fdd8201dSApple OSS Distributions 	thread_group_qos_t tg = kqr_preadopt_thread_group(req);
583*fdd8201dSApple OSS Distributions 	if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
584*fdd8201dSApple OSS Distributions 		/*
585*fdd8201dSApple OSS Distributions 		 * Ideally, we'd add check here to see if thread's preadopt TG is same
586*fdd8201dSApple OSS Distributions 		 * as the thread requests's thread group and short circuit if that is
587*fdd8201dSApple OSS Distributions 		 * the case. But in the interest of keeping the code clean and not
588*fdd8201dSApple OSS Distributions 		 * taking the thread lock here, we're going to skip this. We will
589*fdd8201dSApple OSS Distributions 		 * eventually shortcircuit once we try to set the preadoption thread
590*fdd8201dSApple OSS Distributions 		 * group on the thread.
591*fdd8201dSApple OSS Distributions 		 */
592*fdd8201dSApple OSS Distributions 		return true;
593*fdd8201dSApple OSS Distributions 	}
594*fdd8201dSApple OSS Distributions #endif
595*fdd8201dSApple OSS Distributions 
596*fdd8201dSApple OSS Distributions 	return false;
597*fdd8201dSApple OSS Distributions }
598*fdd8201dSApple OSS Distributions 
599*fdd8201dSApple OSS Distributions static void
workq_thread_update_bucket(proc_t p,struct workqueue * wq,struct uthread * uth,struct uu_workq_policy old_pri,struct uu_workq_policy new_pri,bool force_run)600*fdd8201dSApple OSS Distributions workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
601*fdd8201dSApple OSS Distributions     struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
602*fdd8201dSApple OSS Distributions     bool force_run)
603*fdd8201dSApple OSS Distributions {
604*fdd8201dSApple OSS Distributions 	thread_qos_t old_bucket = old_pri.qos_bucket;
605*fdd8201dSApple OSS Distributions 	thread_qos_t new_bucket = workq_pri_bucket(new_pri);
606*fdd8201dSApple OSS Distributions 
607*fdd8201dSApple OSS Distributions 	if (old_bucket != new_bucket) {
608*fdd8201dSApple OSS Distributions 		_wq_thactive_move(wq, old_bucket, new_bucket);
609*fdd8201dSApple OSS Distributions 	}
610*fdd8201dSApple OSS Distributions 
611*fdd8201dSApple OSS Distributions 	new_pri.qos_bucket = new_bucket;
612*fdd8201dSApple OSS Distributions 	uth->uu_workq_pri = new_pri;
613*fdd8201dSApple OSS Distributions 
614*fdd8201dSApple OSS Distributions 	if (workq_pri_override(old_pri) != new_bucket) {
615*fdd8201dSApple OSS Distributions 		thread_set_workq_override(get_machthread(uth), new_bucket);
616*fdd8201dSApple OSS Distributions 	}
617*fdd8201dSApple OSS Distributions 
618*fdd8201dSApple OSS Distributions 	if (wq->wq_reqcount && (old_bucket > new_bucket || force_run)) {
619*fdd8201dSApple OSS Distributions 		int flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
620*fdd8201dSApple OSS Distributions 		if (old_bucket > new_bucket) {
621*fdd8201dSApple OSS Distributions 			/*
622*fdd8201dSApple OSS Distributions 			 * When lowering our bucket, we may unblock a thread request,
623*fdd8201dSApple OSS Distributions 			 * but we can't drop our priority before we have evaluated
624*fdd8201dSApple OSS Distributions 			 * whether this is the case, and if we ever drop the workqueue lock
625*fdd8201dSApple OSS Distributions 			 * that would cause a priority inversion.
626*fdd8201dSApple OSS Distributions 			 *
627*fdd8201dSApple OSS Distributions 			 * We hence have to disallow thread creation in that case.
628*fdd8201dSApple OSS Distributions 			 */
629*fdd8201dSApple OSS Distributions 			flags = 0;
630*fdd8201dSApple OSS Distributions 		}
631*fdd8201dSApple OSS Distributions 		workq_schedule_creator(p, wq, flags);
632*fdd8201dSApple OSS Distributions 	}
633*fdd8201dSApple OSS Distributions }
634*fdd8201dSApple OSS Distributions 
635*fdd8201dSApple OSS Distributions /*
636*fdd8201dSApple OSS Distributions  * Sets/resets the cpu percent limits on the current thread. We can't set
637*fdd8201dSApple OSS Distributions  * these limits from outside of the current thread, so this function needs
638*fdd8201dSApple OSS Distributions  * to be called when we're executing on the intended
639*fdd8201dSApple OSS Distributions  */
640*fdd8201dSApple OSS Distributions static void
workq_thread_reset_cpupercent(workq_threadreq_t req,struct uthread * uth)641*fdd8201dSApple OSS Distributions workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth)
642*fdd8201dSApple OSS Distributions {
643*fdd8201dSApple OSS Distributions 	assert(uth == current_uthread());
644*fdd8201dSApple OSS Distributions 	workq_threadreq_param_t trp = { };
645*fdd8201dSApple OSS Distributions 
646*fdd8201dSApple OSS Distributions 	if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
647*fdd8201dSApple OSS Distributions 		trp = kqueue_threadreq_workloop_param(req);
648*fdd8201dSApple OSS Distributions 	}
649*fdd8201dSApple OSS Distributions 
650*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_CPUPERCENT) {
651*fdd8201dSApple OSS Distributions 		/*
652*fdd8201dSApple OSS Distributions 		 * Going through disable when we have an existing CPU percent limit
653*fdd8201dSApple OSS Distributions 		 * set will force the ledger to refill the token bucket of the current
654*fdd8201dSApple OSS Distributions 		 * thread. Removing any penalty applied by previous thread use.
655*fdd8201dSApple OSS Distributions 		 */
656*fdd8201dSApple OSS Distributions 		thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
657*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags &= ~UT_WORKQ_CPUPERCENT;
658*fdd8201dSApple OSS Distributions 	}
659*fdd8201dSApple OSS Distributions 
660*fdd8201dSApple OSS Distributions 	if (trp.trp_flags & TRP_CPUPERCENT) {
661*fdd8201dSApple OSS Distributions 		thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
662*fdd8201dSApple OSS Distributions 		    (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
663*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
664*fdd8201dSApple OSS Distributions 	}
665*fdd8201dSApple OSS Distributions }
666*fdd8201dSApple OSS Distributions 
667*fdd8201dSApple OSS Distributions /* Called with the workq lock held */
668*fdd8201dSApple OSS Distributions static void
workq_thread_reset_pri(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req,bool unpark)669*fdd8201dSApple OSS Distributions workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
670*fdd8201dSApple OSS Distributions     workq_threadreq_t req, bool unpark)
671*fdd8201dSApple OSS Distributions {
672*fdd8201dSApple OSS Distributions 	thread_t th = get_machthread(uth);
673*fdd8201dSApple OSS Distributions 	thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
674*fdd8201dSApple OSS Distributions 	workq_threadreq_param_t trp = { };
675*fdd8201dSApple OSS Distributions 	int priority = 31;
676*fdd8201dSApple OSS Distributions 	int policy = POLICY_TIMESHARE;
677*fdd8201dSApple OSS Distributions 
678*fdd8201dSApple OSS Distributions 	if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
679*fdd8201dSApple OSS Distributions 		trp = kqueue_threadreq_workloop_param(req);
680*fdd8201dSApple OSS Distributions 	}
681*fdd8201dSApple OSS Distributions 
682*fdd8201dSApple OSS Distributions 	uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
683*fdd8201dSApple OSS Distributions 	uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
684*fdd8201dSApple OSS Distributions 
685*fdd8201dSApple OSS Distributions 	if (unpark) {
686*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
687*fdd8201dSApple OSS Distributions 		// qos sent out to userspace (may differ from uu_workq_pri on param threads)
688*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.qos = qos;
689*fdd8201dSApple OSS Distributions 	}
690*fdd8201dSApple OSS Distributions 
691*fdd8201dSApple OSS Distributions 	if (qos == WORKQ_THREAD_QOS_MANAGER) {
692*fdd8201dSApple OSS Distributions 		uint32_t mgr_pri = wq->wq_event_manager_priority;
693*fdd8201dSApple OSS Distributions 		assert(trp.trp_value == 0); // manager qos and thread policy don't mix
694*fdd8201dSApple OSS Distributions 
695*fdd8201dSApple OSS Distributions 		if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
696*fdd8201dSApple OSS Distributions 			mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
697*fdd8201dSApple OSS Distributions 			thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
698*fdd8201dSApple OSS Distributions 			    POLICY_TIMESHARE);
699*fdd8201dSApple OSS Distributions 			return;
700*fdd8201dSApple OSS Distributions 		}
701*fdd8201dSApple OSS Distributions 
702*fdd8201dSApple OSS Distributions 		qos = _pthread_priority_thread_qos(mgr_pri);
703*fdd8201dSApple OSS Distributions 	} else {
704*fdd8201dSApple OSS Distributions 		if (trp.trp_flags & TRP_PRIORITY) {
705*fdd8201dSApple OSS Distributions 			qos = THREAD_QOS_UNSPECIFIED;
706*fdd8201dSApple OSS Distributions 			priority = trp.trp_pri;
707*fdd8201dSApple OSS Distributions 			uth->uu_workq_flags |= UT_WORKQ_OUTSIDE_QOS;
708*fdd8201dSApple OSS Distributions 		}
709*fdd8201dSApple OSS Distributions 
710*fdd8201dSApple OSS Distributions 		if (trp.trp_flags & TRP_POLICY) {
711*fdd8201dSApple OSS Distributions 			policy = trp.trp_pol;
712*fdd8201dSApple OSS Distributions 		}
713*fdd8201dSApple OSS Distributions 	}
714*fdd8201dSApple OSS Distributions 
715*fdd8201dSApple OSS Distributions #if CONFIG_PREADOPT_TG
716*fdd8201dSApple OSS Distributions 	if (req && (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP)) {
717*fdd8201dSApple OSS Distributions 		/*
718*fdd8201dSApple OSS Distributions 		 * We cannot safely read and borrow the reference from the kqwl since it
719*fdd8201dSApple OSS Distributions 		 * can disappear from under us at any time due to the max-ing logic in
720*fdd8201dSApple OSS Distributions 		 * kqueue_set_preadopted_thread_group.
721*fdd8201dSApple OSS Distributions 		 *
722*fdd8201dSApple OSS Distributions 		 * As such, we do the following dance:
723*fdd8201dSApple OSS Distributions 		 *
724*fdd8201dSApple OSS Distributions 		 * 1) cmpxchng and steal the kqwl's preadopt thread group and leave
725*fdd8201dSApple OSS Distributions 		 * behind with (NULL + QoS). At this point, we have the reference
726*fdd8201dSApple OSS Distributions 		 * to the thread group from the kqwl.
727*fdd8201dSApple OSS Distributions 		 * 2) Have the thread set the preadoption thread group on itself.
728*fdd8201dSApple OSS Distributions 		 * 3) cmpxchng from (NULL + QoS) which we set earlier in (1), back to
729*fdd8201dSApple OSS Distributions 		 * thread_group + QoS. ie we try to give the reference back to the kqwl.
730*fdd8201dSApple OSS Distributions 		 * If we fail, that's because a higher QoS thread group was set on the
731*fdd8201dSApple OSS Distributions 		 * kqwl in kqueue_set_preadopted_thread_group in which case, we need to
732*fdd8201dSApple OSS Distributions 		 * go back to (1).
733*fdd8201dSApple OSS Distributions 		 */
734*fdd8201dSApple OSS Distributions 
735*fdd8201dSApple OSS Distributions 		_Atomic(struct thread_group *) * tg_loc = kqr_preadopt_thread_group_addr(req);
736*fdd8201dSApple OSS Distributions 
737*fdd8201dSApple OSS Distributions 		thread_group_qos_t old_tg, new_tg;
738*fdd8201dSApple OSS Distributions 		int ret = 0;
739*fdd8201dSApple OSS Distributions again:
740*fdd8201dSApple OSS Distributions 		ret = os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
741*fdd8201dSApple OSS Distributions 			if (!KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
742*fdd8201dSApple OSS Distributions 			        os_atomic_rmw_loop_give_up(break);
743*fdd8201dSApple OSS Distributions 			}
744*fdd8201dSApple OSS Distributions 
745*fdd8201dSApple OSS Distributions 			/*
746*fdd8201dSApple OSS Distributions 			 * Leave the QoS behind - kqueue_set_preadopted_thread_group will
747*fdd8201dSApple OSS Distributions 			 * only modify it if there is a higher QoS thread group to attach
748*fdd8201dSApple OSS Distributions 			 */
749*fdd8201dSApple OSS Distributions 			new_tg = (thread_group_qos_t) ((uintptr_t) old_tg & KQWL_PREADOPT_TG_QOS_MASK);
750*fdd8201dSApple OSS Distributions 		});
751*fdd8201dSApple OSS Distributions 
752*fdd8201dSApple OSS Distributions 		if (ret) {
753*fdd8201dSApple OSS Distributions 			/*
754*fdd8201dSApple OSS Distributions 			 * We successfully took the ref from the kqwl so set it on the
755*fdd8201dSApple OSS Distributions 			 * thread now
756*fdd8201dSApple OSS Distributions 			 */
757*fdd8201dSApple OSS Distributions 			thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
758*fdd8201dSApple OSS Distributions 
759*fdd8201dSApple OSS Distributions 			thread_group_qos_t thread_group_to_expect = new_tg;
760*fdd8201dSApple OSS Distributions 			thread_group_qos_t thread_group_to_set = old_tg;
761*fdd8201dSApple OSS Distributions 
762*fdd8201dSApple OSS Distributions 			os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
763*fdd8201dSApple OSS Distributions 				if (old_tg != thread_group_to_expect) {
764*fdd8201dSApple OSS Distributions 				        /*
765*fdd8201dSApple OSS Distributions 				         * There was an intervening write to the kqwl_preadopt_tg,
766*fdd8201dSApple OSS Distributions 				         * and it has a higher QoS than what we are working with
767*fdd8201dSApple OSS Distributions 				         * here. Abandon our current adopted thread group and redo
768*fdd8201dSApple OSS Distributions 				         * the full dance
769*fdd8201dSApple OSS Distributions 				         */
770*fdd8201dSApple OSS Distributions 				        thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(thread_group_to_set));
771*fdd8201dSApple OSS Distributions 				        os_atomic_rmw_loop_give_up(goto again);
772*fdd8201dSApple OSS Distributions 				}
773*fdd8201dSApple OSS Distributions 
774*fdd8201dSApple OSS Distributions 				new_tg = thread_group_to_set;
775*fdd8201dSApple OSS Distributions 			});
776*fdd8201dSApple OSS Distributions 		} else {
777*fdd8201dSApple OSS Distributions 			/* Nothing valid on the kqwl, just clear what's on the thread */
778*fdd8201dSApple OSS Distributions 			thread_set_preadopt_thread_group(th, NULL);
779*fdd8201dSApple OSS Distributions 		}
780*fdd8201dSApple OSS Distributions 	} else {
781*fdd8201dSApple OSS Distributions 		/* Not even a kqwl, clear what's on the thread */
782*fdd8201dSApple OSS Distributions 		thread_set_preadopt_thread_group(th, NULL);
783*fdd8201dSApple OSS Distributions 	}
784*fdd8201dSApple OSS Distributions #endif
785*fdd8201dSApple OSS Distributions 	thread_set_workq_pri(th, qos, priority, policy);
786*fdd8201dSApple OSS Distributions }
787*fdd8201dSApple OSS Distributions 
788*fdd8201dSApple OSS Distributions /*
789*fdd8201dSApple OSS Distributions  * Called by kevent with the NOTE_WL_THREAD_REQUEST knote lock held,
790*fdd8201dSApple OSS Distributions  * every time a servicer is being told about a new max QoS.
791*fdd8201dSApple OSS Distributions  */
792*fdd8201dSApple OSS Distributions void
workq_thread_set_max_qos(struct proc * p,workq_threadreq_t kqr)793*fdd8201dSApple OSS Distributions workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
794*fdd8201dSApple OSS Distributions {
795*fdd8201dSApple OSS Distributions 	struct uu_workq_policy old_pri, new_pri;
796*fdd8201dSApple OSS Distributions 	struct uthread *uth = current_uthread();
797*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
798*fdd8201dSApple OSS Distributions 	thread_qos_t qos = kqr->tr_kq_qos_index;
799*fdd8201dSApple OSS Distributions 
800*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_pri.qos_max == qos) {
801*fdd8201dSApple OSS Distributions 		return;
802*fdd8201dSApple OSS Distributions 	}
803*fdd8201dSApple OSS Distributions 
804*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
805*fdd8201dSApple OSS Distributions 	old_pri = new_pri = uth->uu_workq_pri;
806*fdd8201dSApple OSS Distributions 	new_pri.qos_max = qos;
807*fdd8201dSApple OSS Distributions 	workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
808*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
809*fdd8201dSApple OSS Distributions }
810*fdd8201dSApple OSS Distributions 
811*fdd8201dSApple OSS Distributions #pragma mark idle threads accounting and handling
812*fdd8201dSApple OSS Distributions 
813*fdd8201dSApple OSS Distributions static inline struct uthread *
workq_oldest_killable_idle_thread(struct workqueue * wq)814*fdd8201dSApple OSS Distributions workq_oldest_killable_idle_thread(struct workqueue *wq)
815*fdd8201dSApple OSS Distributions {
816*fdd8201dSApple OSS Distributions 	struct uthread *uth = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
817*fdd8201dSApple OSS Distributions 
818*fdd8201dSApple OSS Distributions 	if (uth && !uth->uu_save.uus_workq_park_data.has_stack) {
819*fdd8201dSApple OSS Distributions 		uth = TAILQ_PREV(uth, workq_uthread_head, uu_workq_entry);
820*fdd8201dSApple OSS Distributions 		if (uth) {
821*fdd8201dSApple OSS Distributions 			assert(uth->uu_save.uus_workq_park_data.has_stack);
822*fdd8201dSApple OSS Distributions 		}
823*fdd8201dSApple OSS Distributions 	}
824*fdd8201dSApple OSS Distributions 	return uth;
825*fdd8201dSApple OSS Distributions }
826*fdd8201dSApple OSS Distributions 
827*fdd8201dSApple OSS Distributions static inline uint64_t
workq_kill_delay_for_idle_thread(struct workqueue * wq)828*fdd8201dSApple OSS Distributions workq_kill_delay_for_idle_thread(struct workqueue *wq)
829*fdd8201dSApple OSS Distributions {
830*fdd8201dSApple OSS Distributions 	uint64_t delay = wq_reduce_pool_window.abstime;
831*fdd8201dSApple OSS Distributions 	uint16_t idle = wq->wq_thidlecount;
832*fdd8201dSApple OSS Distributions 
833*fdd8201dSApple OSS Distributions 	/*
834*fdd8201dSApple OSS Distributions 	 * If we have less than wq_death_max_load threads, have a 5s timer.
835*fdd8201dSApple OSS Distributions 	 *
836*fdd8201dSApple OSS Distributions 	 * For the next wq_max_constrained_threads ones, decay linearly from
837*fdd8201dSApple OSS Distributions 	 * from 5s to 50ms.
838*fdd8201dSApple OSS Distributions 	 */
839*fdd8201dSApple OSS Distributions 	if (idle <= wq_death_max_load) {
840*fdd8201dSApple OSS Distributions 		return delay;
841*fdd8201dSApple OSS Distributions 	}
842*fdd8201dSApple OSS Distributions 
843*fdd8201dSApple OSS Distributions 	if (wq_max_constrained_threads > idle - wq_death_max_load) {
844*fdd8201dSApple OSS Distributions 		delay *= (wq_max_constrained_threads - (idle - wq_death_max_load));
845*fdd8201dSApple OSS Distributions 	}
846*fdd8201dSApple OSS Distributions 	return delay / wq_max_constrained_threads;
847*fdd8201dSApple OSS Distributions }
848*fdd8201dSApple OSS Distributions 
849*fdd8201dSApple OSS Distributions static inline bool
workq_should_kill_idle_thread(struct workqueue * wq,struct uthread * uth,uint64_t now)850*fdd8201dSApple OSS Distributions workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
851*fdd8201dSApple OSS Distributions     uint64_t now)
852*fdd8201dSApple OSS Distributions {
853*fdd8201dSApple OSS Distributions 	uint64_t delay = workq_kill_delay_for_idle_thread(wq);
854*fdd8201dSApple OSS Distributions 	return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
855*fdd8201dSApple OSS Distributions }
856*fdd8201dSApple OSS Distributions 
857*fdd8201dSApple OSS Distributions static void
workq_death_call_schedule(struct workqueue * wq,uint64_t deadline)858*fdd8201dSApple OSS Distributions workq_death_call_schedule(struct workqueue *wq, uint64_t deadline)
859*fdd8201dSApple OSS Distributions {
860*fdd8201dSApple OSS Distributions 	uint32_t wq_flags = os_atomic_load(&wq->wq_flags, relaxed);
861*fdd8201dSApple OSS Distributions 
862*fdd8201dSApple OSS Distributions 	if (wq_flags & (WQ_EXITING | WQ_DEATH_CALL_SCHEDULED)) {
863*fdd8201dSApple OSS Distributions 		return;
864*fdd8201dSApple OSS Distributions 	}
865*fdd8201dSApple OSS Distributions 	os_atomic_or(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
866*fdd8201dSApple OSS Distributions 
867*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_NONE, wq, 1, 0, 0);
868*fdd8201dSApple OSS Distributions 
869*fdd8201dSApple OSS Distributions 	/*
870*fdd8201dSApple OSS Distributions 	 * <rdar://problem/13139182> Due to how long term timers work, the leeway
871*fdd8201dSApple OSS Distributions 	 * can't be too short, so use 500ms which is long enough that we will not
872*fdd8201dSApple OSS Distributions 	 * wake up the CPU for killing threads, but short enough that it doesn't
873*fdd8201dSApple OSS Distributions 	 * fall into long-term timer list shenanigans.
874*fdd8201dSApple OSS Distributions 	 */
875*fdd8201dSApple OSS Distributions 	thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
876*fdd8201dSApple OSS Distributions 	    wq_reduce_pool_window.abstime / 10,
877*fdd8201dSApple OSS Distributions 	    THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
878*fdd8201dSApple OSS Distributions }
879*fdd8201dSApple OSS Distributions 
880*fdd8201dSApple OSS Distributions /*
881*fdd8201dSApple OSS Distributions  * `decrement` is set to the number of threads that are no longer dying:
882*fdd8201dSApple OSS Distributions  * - because they have been resuscitated just in time (workq_pop_idle_thread)
883*fdd8201dSApple OSS Distributions  * - or have been killed (workq_thread_terminate).
884*fdd8201dSApple OSS Distributions  */
885*fdd8201dSApple OSS Distributions static void
workq_death_policy_evaluate(struct workqueue * wq,uint16_t decrement)886*fdd8201dSApple OSS Distributions workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement)
887*fdd8201dSApple OSS Distributions {
888*fdd8201dSApple OSS Distributions 	struct uthread *uth;
889*fdd8201dSApple OSS Distributions 
890*fdd8201dSApple OSS Distributions 	assert(wq->wq_thdying_count >= decrement);
891*fdd8201dSApple OSS Distributions 	if ((wq->wq_thdying_count -= decrement) > 0) {
892*fdd8201dSApple OSS Distributions 		return;
893*fdd8201dSApple OSS Distributions 	}
894*fdd8201dSApple OSS Distributions 
895*fdd8201dSApple OSS Distributions 	if (wq->wq_thidlecount <= 1) {
896*fdd8201dSApple OSS Distributions 		return;
897*fdd8201dSApple OSS Distributions 	}
898*fdd8201dSApple OSS Distributions 
899*fdd8201dSApple OSS Distributions 	if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
900*fdd8201dSApple OSS Distributions 		return;
901*fdd8201dSApple OSS Distributions 	}
902*fdd8201dSApple OSS Distributions 
903*fdd8201dSApple OSS Distributions 	uint64_t now = mach_absolute_time();
904*fdd8201dSApple OSS Distributions 	uint64_t delay = workq_kill_delay_for_idle_thread(wq);
905*fdd8201dSApple OSS Distributions 
906*fdd8201dSApple OSS Distributions 	if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
907*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
908*fdd8201dSApple OSS Distributions 		    wq, wq->wq_thidlecount, 0, 0);
909*fdd8201dSApple OSS Distributions 		wq->wq_thdying_count++;
910*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags |= UT_WORKQ_DYING;
911*fdd8201dSApple OSS Distributions 		if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
912*fdd8201dSApple OSS Distributions 			workq_thread_wakeup(uth);
913*fdd8201dSApple OSS Distributions 		}
914*fdd8201dSApple OSS Distributions 		return;
915*fdd8201dSApple OSS Distributions 	}
916*fdd8201dSApple OSS Distributions 
917*fdd8201dSApple OSS Distributions 	workq_death_call_schedule(wq,
918*fdd8201dSApple OSS Distributions 	    uth->uu_save.uus_workq_park_data.idle_stamp + delay);
919*fdd8201dSApple OSS Distributions }
920*fdd8201dSApple OSS Distributions 
921*fdd8201dSApple OSS Distributions void
workq_thread_terminate(struct proc * p,struct uthread * uth)922*fdd8201dSApple OSS Distributions workq_thread_terminate(struct proc *p, struct uthread *uth)
923*fdd8201dSApple OSS Distributions {
924*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
925*fdd8201dSApple OSS Distributions 
926*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
927*fdd8201dSApple OSS Distributions 	TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
928*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_DYING) {
929*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
930*fdd8201dSApple OSS Distributions 		    wq, wq->wq_thidlecount, 0, 0);
931*fdd8201dSApple OSS Distributions 		workq_death_policy_evaluate(wq, 1);
932*fdd8201dSApple OSS Distributions 	}
933*fdd8201dSApple OSS Distributions 	if (wq->wq_nthreads-- == wq_max_threads) {
934*fdd8201dSApple OSS Distributions 		/*
935*fdd8201dSApple OSS Distributions 		 * We got under the thread limit again, which may have prevented
936*fdd8201dSApple OSS Distributions 		 * thread creation from happening, redrive if there are pending requests
937*fdd8201dSApple OSS Distributions 		 */
938*fdd8201dSApple OSS Distributions 		if (wq->wq_reqcount) {
939*fdd8201dSApple OSS Distributions 			workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
940*fdd8201dSApple OSS Distributions 		}
941*fdd8201dSApple OSS Distributions 	}
942*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
943*fdd8201dSApple OSS Distributions 
944*fdd8201dSApple OSS Distributions 	thread_deallocate(get_machthread(uth));
945*fdd8201dSApple OSS Distributions }
946*fdd8201dSApple OSS Distributions 
947*fdd8201dSApple OSS Distributions static void
workq_kill_old_threads_call(void * param0,void * param1 __unused)948*fdd8201dSApple OSS Distributions workq_kill_old_threads_call(void *param0, void *param1 __unused)
949*fdd8201dSApple OSS Distributions {
950*fdd8201dSApple OSS Distributions 	struct workqueue *wq = param0;
951*fdd8201dSApple OSS Distributions 
952*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
953*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0);
954*fdd8201dSApple OSS Distributions 	os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
955*fdd8201dSApple OSS Distributions 	workq_death_policy_evaluate(wq, 0);
956*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0);
957*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
958*fdd8201dSApple OSS Distributions }
959*fdd8201dSApple OSS Distributions 
960*fdd8201dSApple OSS Distributions static struct uthread *
workq_pop_idle_thread(struct workqueue * wq,uint16_t uu_flags,bool * needs_wakeup)961*fdd8201dSApple OSS Distributions workq_pop_idle_thread(struct workqueue *wq, uint16_t uu_flags,
962*fdd8201dSApple OSS Distributions     bool *needs_wakeup)
963*fdd8201dSApple OSS Distributions {
964*fdd8201dSApple OSS Distributions 	struct uthread *uth;
965*fdd8201dSApple OSS Distributions 
966*fdd8201dSApple OSS Distributions 	if ((uth = TAILQ_FIRST(&wq->wq_thidlelist))) {
967*fdd8201dSApple OSS Distributions 		TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
968*fdd8201dSApple OSS Distributions 	} else {
969*fdd8201dSApple OSS Distributions 		uth = TAILQ_FIRST(&wq->wq_thnewlist);
970*fdd8201dSApple OSS Distributions 		TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
971*fdd8201dSApple OSS Distributions 	}
972*fdd8201dSApple OSS Distributions 	TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
973*fdd8201dSApple OSS Distributions 
974*fdd8201dSApple OSS Distributions 	assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
975*fdd8201dSApple OSS Distributions 	uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
976*fdd8201dSApple OSS Distributions 
977*fdd8201dSApple OSS Distributions 	/* A thread is never woken up as part of the cooperative pool */
978*fdd8201dSApple OSS Distributions 	assert((uu_flags & UT_WORKQ_COOPERATIVE) == 0);
979*fdd8201dSApple OSS Distributions 
980*fdd8201dSApple OSS Distributions 	if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
981*fdd8201dSApple OSS Distributions 		wq->wq_constrained_threads_scheduled++;
982*fdd8201dSApple OSS Distributions 	}
983*fdd8201dSApple OSS Distributions 	wq->wq_threads_scheduled++;
984*fdd8201dSApple OSS Distributions 	wq->wq_thidlecount--;
985*fdd8201dSApple OSS Distributions 
986*fdd8201dSApple OSS Distributions 	if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
987*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags ^= UT_WORKQ_DYING;
988*fdd8201dSApple OSS Distributions 		workq_death_policy_evaluate(wq, 1);
989*fdd8201dSApple OSS Distributions 		*needs_wakeup = false;
990*fdd8201dSApple OSS Distributions 	} else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
991*fdd8201dSApple OSS Distributions 		*needs_wakeup = false;
992*fdd8201dSApple OSS Distributions 	} else {
993*fdd8201dSApple OSS Distributions 		*needs_wakeup = true;
994*fdd8201dSApple OSS Distributions 	}
995*fdd8201dSApple OSS Distributions 	return uth;
996*fdd8201dSApple OSS Distributions }
997*fdd8201dSApple OSS Distributions 
998*fdd8201dSApple OSS Distributions /*
999*fdd8201dSApple OSS Distributions  * Called by thread_create_workq_waiting() during thread initialization, before
1000*fdd8201dSApple OSS Distributions  * assert_wait, before the thread has been started.
1001*fdd8201dSApple OSS Distributions  */
1002*fdd8201dSApple OSS Distributions event_t
workq_thread_init_and_wq_lock(task_t task,thread_t th)1003*fdd8201dSApple OSS Distributions workq_thread_init_and_wq_lock(task_t task, thread_t th)
1004*fdd8201dSApple OSS Distributions {
1005*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(th);
1006*fdd8201dSApple OSS Distributions 
1007*fdd8201dSApple OSS Distributions 	uth->uu_workq_flags = UT_WORKQ_NEW;
1008*fdd8201dSApple OSS Distributions 	uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
1009*fdd8201dSApple OSS Distributions 	uth->uu_workq_thport = MACH_PORT_NULL;
1010*fdd8201dSApple OSS Distributions 	uth->uu_workq_stackaddr = 0;
1011*fdd8201dSApple OSS Distributions 	uth->uu_workq_pthread_kill_allowed = 0;
1012*fdd8201dSApple OSS Distributions 
1013*fdd8201dSApple OSS Distributions 	thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
1014*fdd8201dSApple OSS Distributions 	thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
1015*fdd8201dSApple OSS Distributions 
1016*fdd8201dSApple OSS Distributions 	workq_lock_spin(proc_get_wqptr_fast(get_bsdtask_info(task)));
1017*fdd8201dSApple OSS Distributions 	return workq_parked_wait_event(uth);
1018*fdd8201dSApple OSS Distributions }
1019*fdd8201dSApple OSS Distributions 
1020*fdd8201dSApple OSS Distributions /**
1021*fdd8201dSApple OSS Distributions  * Try to add a new workqueue thread.
1022*fdd8201dSApple OSS Distributions  *
1023*fdd8201dSApple OSS Distributions  * - called with workq lock held
1024*fdd8201dSApple OSS Distributions  * - dropped and retaken around thread creation
1025*fdd8201dSApple OSS Distributions  * - return with workq lock held
1026*fdd8201dSApple OSS Distributions  */
1027*fdd8201dSApple OSS Distributions static bool
workq_add_new_idle_thread(proc_t p,struct workqueue * wq)1028*fdd8201dSApple OSS Distributions workq_add_new_idle_thread(proc_t p, struct workqueue *wq)
1029*fdd8201dSApple OSS Distributions {
1030*fdd8201dSApple OSS Distributions 	mach_vm_offset_t th_stackaddr;
1031*fdd8201dSApple OSS Distributions 	kern_return_t kret;
1032*fdd8201dSApple OSS Distributions 	thread_t th;
1033*fdd8201dSApple OSS Distributions 
1034*fdd8201dSApple OSS Distributions 	wq->wq_nthreads++;
1035*fdd8201dSApple OSS Distributions 
1036*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
1037*fdd8201dSApple OSS Distributions 
1038*fdd8201dSApple OSS Distributions 	vm_map_t vmap = get_task_map(p->task);
1039*fdd8201dSApple OSS Distributions 
1040*fdd8201dSApple OSS Distributions 	kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
1041*fdd8201dSApple OSS Distributions 	if (kret != KERN_SUCCESS) {
1042*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1043*fdd8201dSApple OSS Distributions 		    kret, 1, 0);
1044*fdd8201dSApple OSS Distributions 		goto out;
1045*fdd8201dSApple OSS Distributions 	}
1046*fdd8201dSApple OSS Distributions 
1047*fdd8201dSApple OSS Distributions 	kret = thread_create_workq_waiting(p->task, workq_unpark_continue, &th);
1048*fdd8201dSApple OSS Distributions 	if (kret != KERN_SUCCESS) {
1049*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1050*fdd8201dSApple OSS Distributions 		    kret, 0, 0);
1051*fdd8201dSApple OSS Distributions 		pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
1052*fdd8201dSApple OSS Distributions 		goto out;
1053*fdd8201dSApple OSS Distributions 	}
1054*fdd8201dSApple OSS Distributions 
1055*fdd8201dSApple OSS Distributions 	// thread_create_workq_waiting() will return with the wq lock held
1056*fdd8201dSApple OSS Distributions 	// on success, because it calls workq_thread_init_and_wq_lock() above
1057*fdd8201dSApple OSS Distributions 
1058*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(th);
1059*fdd8201dSApple OSS Distributions 
1060*fdd8201dSApple OSS Distributions 	wq->wq_creations++;
1061*fdd8201dSApple OSS Distributions 	wq->wq_thidlecount++;
1062*fdd8201dSApple OSS Distributions 	uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
1063*fdd8201dSApple OSS Distributions 	TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1064*fdd8201dSApple OSS Distributions 
1065*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0);
1066*fdd8201dSApple OSS Distributions 	return true;
1067*fdd8201dSApple OSS Distributions 
1068*fdd8201dSApple OSS Distributions out:
1069*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
1070*fdd8201dSApple OSS Distributions 	/*
1071*fdd8201dSApple OSS Distributions 	 * Do not redrive here if we went under wq_max_threads again,
1072*fdd8201dSApple OSS Distributions 	 * it is the responsibility of the callers of this function
1073*fdd8201dSApple OSS Distributions 	 * to do so when it fails.
1074*fdd8201dSApple OSS Distributions 	 */
1075*fdd8201dSApple OSS Distributions 	wq->wq_nthreads--;
1076*fdd8201dSApple OSS Distributions 	return false;
1077*fdd8201dSApple OSS Distributions }
1078*fdd8201dSApple OSS Distributions 
1079*fdd8201dSApple OSS Distributions static inline bool
workq_thread_is_overcommit(struct uthread * uth)1080*fdd8201dSApple OSS Distributions workq_thread_is_overcommit(struct uthread *uth)
1081*fdd8201dSApple OSS Distributions {
1082*fdd8201dSApple OSS Distributions 	return (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0;
1083*fdd8201dSApple OSS Distributions }
1084*fdd8201dSApple OSS Distributions 
1085*fdd8201dSApple OSS Distributions static inline bool
workq_thread_is_nonovercommit(struct uthread * uth)1086*fdd8201dSApple OSS Distributions workq_thread_is_nonovercommit(struct uthread *uth)
1087*fdd8201dSApple OSS Distributions {
1088*fdd8201dSApple OSS Distributions 	return (uth->uu_workq_flags & (UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE)) == 0;
1089*fdd8201dSApple OSS Distributions }
1090*fdd8201dSApple OSS Distributions 
1091*fdd8201dSApple OSS Distributions static inline bool
workq_thread_is_cooperative(struct uthread * uth)1092*fdd8201dSApple OSS Distributions workq_thread_is_cooperative(struct uthread *uth)
1093*fdd8201dSApple OSS Distributions {
1094*fdd8201dSApple OSS Distributions 	return (uth->uu_workq_flags & UT_WORKQ_COOPERATIVE) != 0;
1095*fdd8201dSApple OSS Distributions }
1096*fdd8201dSApple OSS Distributions 
1097*fdd8201dSApple OSS Distributions static inline void
workq_thread_set_type(struct uthread * uth,uint16_t flags)1098*fdd8201dSApple OSS Distributions workq_thread_set_type(struct uthread *uth, uint16_t flags)
1099*fdd8201dSApple OSS Distributions {
1100*fdd8201dSApple OSS Distributions 	uth->uu_workq_flags &= ~(UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1101*fdd8201dSApple OSS Distributions 	uth->uu_workq_flags |= flags;
1102*fdd8201dSApple OSS Distributions }
1103*fdd8201dSApple OSS Distributions 
1104*fdd8201dSApple OSS Distributions 
1105*fdd8201dSApple OSS Distributions #define WORKQ_UNPARK_FOR_DEATH_WAS_IDLE 0x1
1106*fdd8201dSApple OSS Distributions 
1107*fdd8201dSApple OSS Distributions __attribute__((noreturn, noinline))
1108*fdd8201dSApple OSS Distributions static void
workq_unpark_for_death_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t death_flags,uint32_t setup_flags)1109*fdd8201dSApple OSS Distributions workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
1110*fdd8201dSApple OSS Distributions     struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
1111*fdd8201dSApple OSS Distributions {
1112*fdd8201dSApple OSS Distributions 	thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
1113*fdd8201dSApple OSS Distributions 	bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
1114*fdd8201dSApple OSS Distributions 
1115*fdd8201dSApple OSS Distributions 	if (qos > WORKQ_THREAD_QOS_CLEANUP) {
1116*fdd8201dSApple OSS Distributions 		workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
1117*fdd8201dSApple OSS Distributions 		qos = WORKQ_THREAD_QOS_CLEANUP;
1118*fdd8201dSApple OSS Distributions 	}
1119*fdd8201dSApple OSS Distributions 
1120*fdd8201dSApple OSS Distributions 	workq_thread_reset_cpupercent(NULL, uth);
1121*fdd8201dSApple OSS Distributions 
1122*fdd8201dSApple OSS Distributions 	if (death_flags & WORKQ_UNPARK_FOR_DEATH_WAS_IDLE) {
1123*fdd8201dSApple OSS Distributions 		wq->wq_thidlecount--;
1124*fdd8201dSApple OSS Distributions 		if (first_use) {
1125*fdd8201dSApple OSS Distributions 			TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1126*fdd8201dSApple OSS Distributions 		} else {
1127*fdd8201dSApple OSS Distributions 			TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1128*fdd8201dSApple OSS Distributions 		}
1129*fdd8201dSApple OSS Distributions 	}
1130*fdd8201dSApple OSS Distributions 	TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1131*fdd8201dSApple OSS Distributions 
1132*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
1133*fdd8201dSApple OSS Distributions 
1134*fdd8201dSApple OSS Distributions 	if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
1135*fdd8201dSApple OSS Distributions 		__assert_only kern_return_t kr;
1136*fdd8201dSApple OSS Distributions 		kr = thread_set_voucher_name(MACH_PORT_NULL);
1137*fdd8201dSApple OSS Distributions 		assert(kr == KERN_SUCCESS);
1138*fdd8201dSApple OSS Distributions 	}
1139*fdd8201dSApple OSS Distributions 
1140*fdd8201dSApple OSS Distributions 	uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
1141*fdd8201dSApple OSS Distributions 	thread_t th = get_machthread(uth);
1142*fdd8201dSApple OSS Distributions 	vm_map_t vmap = get_task_map(p->task);
1143*fdd8201dSApple OSS Distributions 
1144*fdd8201dSApple OSS Distributions 	if (!first_use) {
1145*fdd8201dSApple OSS Distributions 		flags |= WQ_FLAG_THREAD_REUSE;
1146*fdd8201dSApple OSS Distributions 	}
1147*fdd8201dSApple OSS Distributions 
1148*fdd8201dSApple OSS Distributions 	pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
1149*fdd8201dSApple OSS Distributions 	    uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
1150*fdd8201dSApple OSS Distributions 	__builtin_unreachable();
1151*fdd8201dSApple OSS Distributions }
1152*fdd8201dSApple OSS Distributions 
1153*fdd8201dSApple OSS Distributions bool
workq_is_current_thread_updating_turnstile(struct workqueue * wq)1154*fdd8201dSApple OSS Distributions workq_is_current_thread_updating_turnstile(struct workqueue *wq)
1155*fdd8201dSApple OSS Distributions {
1156*fdd8201dSApple OSS Distributions 	return wq->wq_turnstile_updater == current_thread();
1157*fdd8201dSApple OSS Distributions }
1158*fdd8201dSApple OSS Distributions 
1159*fdd8201dSApple OSS Distributions __attribute__((always_inline))
1160*fdd8201dSApple OSS Distributions static inline void
1161*fdd8201dSApple OSS Distributions workq_perform_turnstile_operation_locked(struct workqueue *wq,
1162*fdd8201dSApple OSS Distributions     void (^operation)(void))
1163*fdd8201dSApple OSS Distributions {
1164*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
1165*fdd8201dSApple OSS Distributions 	wq->wq_turnstile_updater = current_thread();
1166*fdd8201dSApple OSS Distributions 	operation();
1167*fdd8201dSApple OSS Distributions 	wq->wq_turnstile_updater = THREAD_NULL;
1168*fdd8201dSApple OSS Distributions }
1169*fdd8201dSApple OSS Distributions 
1170*fdd8201dSApple OSS Distributions static void
workq_turnstile_update_inheritor(struct workqueue * wq,turnstile_inheritor_t inheritor,turnstile_update_flags_t flags)1171*fdd8201dSApple OSS Distributions workq_turnstile_update_inheritor(struct workqueue *wq,
1172*fdd8201dSApple OSS Distributions     turnstile_inheritor_t inheritor,
1173*fdd8201dSApple OSS Distributions     turnstile_update_flags_t flags)
1174*fdd8201dSApple OSS Distributions {
1175*fdd8201dSApple OSS Distributions 	if (wq->wq_inheritor == inheritor) {
1176*fdd8201dSApple OSS Distributions 		return;
1177*fdd8201dSApple OSS Distributions 	}
1178*fdd8201dSApple OSS Distributions 	wq->wq_inheritor = inheritor;
1179*fdd8201dSApple OSS Distributions 	workq_perform_turnstile_operation_locked(wq, ^{
1180*fdd8201dSApple OSS Distributions 		turnstile_update_inheritor(wq->wq_turnstile, inheritor,
1181*fdd8201dSApple OSS Distributions 		flags | TURNSTILE_IMMEDIATE_UPDATE);
1182*fdd8201dSApple OSS Distributions 		turnstile_update_inheritor_complete(wq->wq_turnstile,
1183*fdd8201dSApple OSS Distributions 		TURNSTILE_INTERLOCK_HELD);
1184*fdd8201dSApple OSS Distributions 	});
1185*fdd8201dSApple OSS Distributions }
1186*fdd8201dSApple OSS Distributions 
1187*fdd8201dSApple OSS Distributions static void
workq_push_idle_thread(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)1188*fdd8201dSApple OSS Distributions workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
1189*fdd8201dSApple OSS Distributions     uint32_t setup_flags)
1190*fdd8201dSApple OSS Distributions {
1191*fdd8201dSApple OSS Distributions 	uint64_t now = mach_absolute_time();
1192*fdd8201dSApple OSS Distributions 	bool is_creator = (uth == wq->wq_creator);
1193*fdd8201dSApple OSS Distributions 
1194*fdd8201dSApple OSS Distributions 	if (workq_thread_is_cooperative(uth)) {
1195*fdd8201dSApple OSS Distributions 		assert(!is_creator);
1196*fdd8201dSApple OSS Distributions 
1197*fdd8201dSApple OSS Distributions 		thread_qos_t thread_qos = uth->uu_workq_pri.qos_bucket;
1198*fdd8201dSApple OSS Distributions 		_wq_cooperative_queue_scheduled_count_dec(wq, thread_qos);
1199*fdd8201dSApple OSS Distributions 
1200*fdd8201dSApple OSS Distributions 		/* Before we get here, we always go through
1201*fdd8201dSApple OSS Distributions 		 * workq_select_threadreq_or_park_and_unlock. If we got here, it means
1202*fdd8201dSApple OSS Distributions 		 * that we went through the logic in workq_threadreq_select which
1203*fdd8201dSApple OSS Distributions 		 * did the refresh for the next best cooperative qos while
1204*fdd8201dSApple OSS Distributions 		 * excluding the current thread - we shouldn't need to do it again.
1205*fdd8201dSApple OSS Distributions 		 */
1206*fdd8201dSApple OSS Distributions 		assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
1207*fdd8201dSApple OSS Distributions 	} else if (workq_thread_is_nonovercommit(uth)) {
1208*fdd8201dSApple OSS Distributions 		assert(!is_creator);
1209*fdd8201dSApple OSS Distributions 
1210*fdd8201dSApple OSS Distributions 		wq->wq_constrained_threads_scheduled--;
1211*fdd8201dSApple OSS Distributions 	}
1212*fdd8201dSApple OSS Distributions 
1213*fdd8201dSApple OSS Distributions 	uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1214*fdd8201dSApple OSS Distributions 	TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
1215*fdd8201dSApple OSS Distributions 	wq->wq_threads_scheduled--;
1216*fdd8201dSApple OSS Distributions 
1217*fdd8201dSApple OSS Distributions 	if (is_creator) {
1218*fdd8201dSApple OSS Distributions 		wq->wq_creator = NULL;
1219*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
1220*fdd8201dSApple OSS Distributions 		    uth->uu_save.uus_workq_park_data.yields);
1221*fdd8201dSApple OSS Distributions 	}
1222*fdd8201dSApple OSS Distributions 
1223*fdd8201dSApple OSS Distributions 	if (wq->wq_inheritor == get_machthread(uth)) {
1224*fdd8201dSApple OSS Distributions 		assert(wq->wq_creator == NULL);
1225*fdd8201dSApple OSS Distributions 		if (wq->wq_reqcount) {
1226*fdd8201dSApple OSS Distributions 			workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
1227*fdd8201dSApple OSS Distributions 		} else {
1228*fdd8201dSApple OSS Distributions 			workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
1229*fdd8201dSApple OSS Distributions 		}
1230*fdd8201dSApple OSS Distributions 	}
1231*fdd8201dSApple OSS Distributions 
1232*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_NEW) {
1233*fdd8201dSApple OSS Distributions 		assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
1234*fdd8201dSApple OSS Distributions 		TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1235*fdd8201dSApple OSS Distributions 		wq->wq_thidlecount++;
1236*fdd8201dSApple OSS Distributions 		return;
1237*fdd8201dSApple OSS Distributions 	}
1238*fdd8201dSApple OSS Distributions 
1239*fdd8201dSApple OSS Distributions 	if (!is_creator) {
1240*fdd8201dSApple OSS Distributions 		_wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
1241*fdd8201dSApple OSS Distributions 		wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
1242*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
1243*fdd8201dSApple OSS Distributions 	}
1244*fdd8201dSApple OSS Distributions 
1245*fdd8201dSApple OSS Distributions 	uth->uu_save.uus_workq_park_data.idle_stamp = now;
1246*fdd8201dSApple OSS Distributions 
1247*fdd8201dSApple OSS Distributions 	struct uthread *oldest = workq_oldest_killable_idle_thread(wq);
1248*fdd8201dSApple OSS Distributions 	uint16_t cur_idle = wq->wq_thidlecount;
1249*fdd8201dSApple OSS Distributions 
1250*fdd8201dSApple OSS Distributions 	if (cur_idle >= wq_max_constrained_threads ||
1251*fdd8201dSApple OSS Distributions 	    (wq->wq_thdying_count == 0 && oldest &&
1252*fdd8201dSApple OSS Distributions 	    workq_should_kill_idle_thread(wq, oldest, now))) {
1253*fdd8201dSApple OSS Distributions 		/*
1254*fdd8201dSApple OSS Distributions 		 * Immediately kill threads if we have too may of them.
1255*fdd8201dSApple OSS Distributions 		 *
1256*fdd8201dSApple OSS Distributions 		 * And swap "place" with the oldest one we'd have woken up.
1257*fdd8201dSApple OSS Distributions 		 * This is a relatively desperate situation where we really
1258*fdd8201dSApple OSS Distributions 		 * need to kill threads quickly and it's best to kill
1259*fdd8201dSApple OSS Distributions 		 * the one that's currently on core than context switching.
1260*fdd8201dSApple OSS Distributions 		 */
1261*fdd8201dSApple OSS Distributions 		if (oldest) {
1262*fdd8201dSApple OSS Distributions 			oldest->uu_save.uus_workq_park_data.idle_stamp = now;
1263*fdd8201dSApple OSS Distributions 			TAILQ_REMOVE(&wq->wq_thidlelist, oldest, uu_workq_entry);
1264*fdd8201dSApple OSS Distributions 			TAILQ_INSERT_HEAD(&wq->wq_thidlelist, oldest, uu_workq_entry);
1265*fdd8201dSApple OSS Distributions 		}
1266*fdd8201dSApple OSS Distributions 
1267*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
1268*fdd8201dSApple OSS Distributions 		    wq, cur_idle, 0, 0);
1269*fdd8201dSApple OSS Distributions 		wq->wq_thdying_count++;
1270*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags |= UT_WORKQ_DYING;
1271*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
1272*fdd8201dSApple OSS Distributions 		workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
1273*fdd8201dSApple OSS Distributions 		__builtin_unreachable();
1274*fdd8201dSApple OSS Distributions 	}
1275*fdd8201dSApple OSS Distributions 
1276*fdd8201dSApple OSS Distributions 	struct uthread *tail = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
1277*fdd8201dSApple OSS Distributions 
1278*fdd8201dSApple OSS Distributions 	cur_idle += 1;
1279*fdd8201dSApple OSS Distributions 	wq->wq_thidlecount = cur_idle;
1280*fdd8201dSApple OSS Distributions 
1281*fdd8201dSApple OSS Distributions 	if (cur_idle >= wq_death_max_load && tail &&
1282*fdd8201dSApple OSS Distributions 	    tail->uu_save.uus_workq_park_data.has_stack) {
1283*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.has_stack = false;
1284*fdd8201dSApple OSS Distributions 		TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
1285*fdd8201dSApple OSS Distributions 	} else {
1286*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.has_stack = true;
1287*fdd8201dSApple OSS Distributions 		TAILQ_INSERT_HEAD(&wq->wq_thidlelist, uth, uu_workq_entry);
1288*fdd8201dSApple OSS Distributions 	}
1289*fdd8201dSApple OSS Distributions 
1290*fdd8201dSApple OSS Distributions 	if (!tail) {
1291*fdd8201dSApple OSS Distributions 		uint64_t delay = workq_kill_delay_for_idle_thread(wq);
1292*fdd8201dSApple OSS Distributions 		workq_death_call_schedule(wq, now + delay);
1293*fdd8201dSApple OSS Distributions 	}
1294*fdd8201dSApple OSS Distributions }
1295*fdd8201dSApple OSS Distributions 
1296*fdd8201dSApple OSS Distributions #pragma mark thread requests
1297*fdd8201dSApple OSS Distributions 
1298*fdd8201dSApple OSS Distributions static inline bool
workq_tr_is_overcommit(workq_tr_flags_t tr_flags)1299*fdd8201dSApple OSS Distributions workq_tr_is_overcommit(workq_tr_flags_t tr_flags)
1300*fdd8201dSApple OSS Distributions {
1301*fdd8201dSApple OSS Distributions 	return (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) != 0;
1302*fdd8201dSApple OSS Distributions }
1303*fdd8201dSApple OSS Distributions 
1304*fdd8201dSApple OSS Distributions static inline bool
workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)1305*fdd8201dSApple OSS Distributions workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)
1306*fdd8201dSApple OSS Distributions {
1307*fdd8201dSApple OSS Distributions 	return (tr_flags & (WORKQ_TR_FLAG_OVERCOMMIT | WORKQ_TR_FLAG_COOPERATIVE)) == 0;
1308*fdd8201dSApple OSS Distributions }
1309*fdd8201dSApple OSS Distributions 
1310*fdd8201dSApple OSS Distributions static inline bool
workq_tr_is_cooperative(workq_tr_flags_t tr_flags)1311*fdd8201dSApple OSS Distributions workq_tr_is_cooperative(workq_tr_flags_t tr_flags)
1312*fdd8201dSApple OSS Distributions {
1313*fdd8201dSApple OSS Distributions 	return (tr_flags & WORKQ_TR_FLAG_COOPERATIVE) != 0;
1314*fdd8201dSApple OSS Distributions }
1315*fdd8201dSApple OSS Distributions 
1316*fdd8201dSApple OSS Distributions #define workq_threadreq_is_overcommit(req) workq_tr_is_overcommit((req)->tr_flags)
1317*fdd8201dSApple OSS Distributions #define workq_threadreq_is_nonovercommit(req) workq_tr_is_nonovercommit((req)->tr_flags)
1318*fdd8201dSApple OSS Distributions #define workq_threadreq_is_cooperative(req) workq_tr_is_cooperative((req)->tr_flags)
1319*fdd8201dSApple OSS Distributions 
1320*fdd8201dSApple OSS Distributions static inline int
workq_priority_for_req(workq_threadreq_t req)1321*fdd8201dSApple OSS Distributions workq_priority_for_req(workq_threadreq_t req)
1322*fdd8201dSApple OSS Distributions {
1323*fdd8201dSApple OSS Distributions 	thread_qos_t qos = req->tr_qos;
1324*fdd8201dSApple OSS Distributions 
1325*fdd8201dSApple OSS Distributions 	if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1326*fdd8201dSApple OSS Distributions 		workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
1327*fdd8201dSApple OSS Distributions 		assert(trp.trp_flags & TRP_PRIORITY);
1328*fdd8201dSApple OSS Distributions 		return trp.trp_pri;
1329*fdd8201dSApple OSS Distributions 	}
1330*fdd8201dSApple OSS Distributions 	return thread_workq_pri_for_qos(qos);
1331*fdd8201dSApple OSS Distributions }
1332*fdd8201dSApple OSS Distributions 
1333*fdd8201dSApple OSS Distributions static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue * wq,workq_threadreq_t req)1334*fdd8201dSApple OSS Distributions workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
1335*fdd8201dSApple OSS Distributions {
1336*fdd8201dSApple OSS Distributions 	assert(!workq_tr_is_cooperative(req->tr_flags));
1337*fdd8201dSApple OSS Distributions 
1338*fdd8201dSApple OSS Distributions 	if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1339*fdd8201dSApple OSS Distributions 		return &wq->wq_special_queue;
1340*fdd8201dSApple OSS Distributions 	} else if (workq_tr_is_overcommit(req->tr_flags)) {
1341*fdd8201dSApple OSS Distributions 		return &wq->wq_overcommit_queue;
1342*fdd8201dSApple OSS Distributions 	} else {
1343*fdd8201dSApple OSS Distributions 		return &wq->wq_constrained_queue;
1344*fdd8201dSApple OSS Distributions 	}
1345*fdd8201dSApple OSS Distributions }
1346*fdd8201dSApple OSS Distributions 
1347*fdd8201dSApple OSS Distributions 
1348*fdd8201dSApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1349*fdd8201dSApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos(struct workqueue * wq,thread_qos_t qos)1350*fdd8201dSApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos(struct workqueue *wq, thread_qos_t qos)
1351*fdd8201dSApple OSS Distributions {
1352*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
1353*fdd8201dSApple OSS Distributions 
1354*fdd8201dSApple OSS Distributions 	uint64_t num_cooperative_threads = 0;
1355*fdd8201dSApple OSS Distributions 
1356*fdd8201dSApple OSS Distributions 	for (thread_qos_t cur_qos = WORKQ_THREAD_QOS_MAX; cur_qos >= qos; cur_qos--) {
1357*fdd8201dSApple OSS Distributions 		uint8_t bucket = _wq_bucket(cur_qos);
1358*fdd8201dSApple OSS Distributions 		num_cooperative_threads += wq->wq_cooperative_queue_scheduled_count[bucket];
1359*fdd8201dSApple OSS Distributions 	}
1360*fdd8201dSApple OSS Distributions 
1361*fdd8201dSApple OSS Distributions 	return num_cooperative_threads;
1362*fdd8201dSApple OSS Distributions }
1363*fdd8201dSApple OSS Distributions 
1364*fdd8201dSApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_total(struct workqueue * wq)1365*fdd8201dSApple OSS Distributions workq_num_cooperative_threads_scheduled_total(struct workqueue *wq)
1366*fdd8201dSApple OSS Distributions {
1367*fdd8201dSApple OSS Distributions 	return workq_num_cooperative_threads_scheduled_to_qos(wq, WORKQ_THREAD_QOS_MIN);
1368*fdd8201dSApple OSS Distributions }
1369*fdd8201dSApple OSS Distributions 
1370*fdd8201dSApple OSS Distributions #if DEBUG || DEVELOPMENT
1371*fdd8201dSApple OSS Distributions static bool
workq_has_cooperative_thread_requests(struct workqueue * wq)1372*fdd8201dSApple OSS Distributions workq_has_cooperative_thread_requests(struct workqueue *wq)
1373*fdd8201dSApple OSS Distributions {
1374*fdd8201dSApple OSS Distributions 	for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1375*fdd8201dSApple OSS Distributions 		uint8_t bucket = _wq_bucket(qos);
1376*fdd8201dSApple OSS Distributions 		if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1377*fdd8201dSApple OSS Distributions 			return true;
1378*fdd8201dSApple OSS Distributions 		}
1379*fdd8201dSApple OSS Distributions 	}
1380*fdd8201dSApple OSS Distributions 
1381*fdd8201dSApple OSS Distributions 	return false;
1382*fdd8201dSApple OSS Distributions }
1383*fdd8201dSApple OSS Distributions #endif
1384*fdd8201dSApple OSS Distributions 
1385*fdd8201dSApple OSS Distributions /*
1386*fdd8201dSApple OSS Distributions  * Determines the next QoS bucket we should service next in the cooperative
1387*fdd8201dSApple OSS Distributions  * pool. This function will always return a QoS for cooperative pool as long as
1388*fdd8201dSApple OSS Distributions  * there are requests to be serviced.
1389*fdd8201dSApple OSS Distributions  *
1390*fdd8201dSApple OSS Distributions  * Unlike the other thread pools, for the cooperative thread pool the schedule
1391*fdd8201dSApple OSS Distributions  * counts for the various buckets in the pool affect the next best request for
1392*fdd8201dSApple OSS Distributions  * it.
1393*fdd8201dSApple OSS Distributions  *
1394*fdd8201dSApple OSS Distributions  * This function is called in the following contexts:
1395*fdd8201dSApple OSS Distributions  *
1396*fdd8201dSApple OSS Distributions  * a) When determining the best thread QoS for cooperative bucket for the
1397*fdd8201dSApple OSS Distributions  * creator/thread reuse
1398*fdd8201dSApple OSS Distributions  *
1399*fdd8201dSApple OSS Distributions  * b) Once (a) has happened and thread has bound to a thread request, figuring
1400*fdd8201dSApple OSS Distributions  * out whether the next best request for this pool has changed so that creator
1401*fdd8201dSApple OSS Distributions  * can be scheduled.
1402*fdd8201dSApple OSS Distributions  *
1403*fdd8201dSApple OSS Distributions  * Returns true if the cooperative queue's best qos changed from previous
1404*fdd8201dSApple OSS Distributions  * value.
1405*fdd8201dSApple OSS Distributions  */
1406*fdd8201dSApple OSS Distributions static bool
_wq_cooperative_queue_refresh_best_req_qos(struct workqueue * wq)1407*fdd8201dSApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq)
1408*fdd8201dSApple OSS Distributions {
1409*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
1410*fdd8201dSApple OSS Distributions 
1411*fdd8201dSApple OSS Distributions 	thread_qos_t old_best_req_qos = wq->wq_cooperative_queue_best_req_qos;
1412*fdd8201dSApple OSS Distributions 
1413*fdd8201dSApple OSS Distributions 	/* We determine the next best cooperative thread request based on the
1414*fdd8201dSApple OSS Distributions 	 * following:
1415*fdd8201dSApple OSS Distributions 	 *
1416*fdd8201dSApple OSS Distributions 	 * 1. Take the MAX of the following:
1417*fdd8201dSApple OSS Distributions 	 *		a) Highest qos with pending TRs such that number of scheduled
1418*fdd8201dSApple OSS Distributions 	 *		threads so far with >= qos is < wq_max_cooperative_threads
1419*fdd8201dSApple OSS Distributions 	 *		b) Highest qos bucket with pending TRs but no scheduled threads for that bucket
1420*fdd8201dSApple OSS Distributions 	 *
1421*fdd8201dSApple OSS Distributions 	 * 2. If the result of (1) is UN, then we pick the highest priority amongst
1422*fdd8201dSApple OSS Distributions 	 * pending thread requests in the pool.
1423*fdd8201dSApple OSS Distributions 	 *
1424*fdd8201dSApple OSS Distributions 	 */
1425*fdd8201dSApple OSS Distributions 	thread_qos_t highest_qos_with_no_scheduled = THREAD_QOS_UNSPECIFIED;
1426*fdd8201dSApple OSS Distributions 	thread_qos_t highest_qos_req_with_width = THREAD_QOS_UNSPECIFIED;
1427*fdd8201dSApple OSS Distributions 
1428*fdd8201dSApple OSS Distributions 	thread_qos_t highest_qos_req = THREAD_QOS_UNSPECIFIED;
1429*fdd8201dSApple OSS Distributions 
1430*fdd8201dSApple OSS Distributions 	int scheduled_count_till_qos = 0;
1431*fdd8201dSApple OSS Distributions 
1432*fdd8201dSApple OSS Distributions 	for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1433*fdd8201dSApple OSS Distributions 		uint8_t bucket = _wq_bucket(qos);
1434*fdd8201dSApple OSS Distributions 		uint8_t scheduled_count_for_bucket = wq->wq_cooperative_queue_scheduled_count[bucket];
1435*fdd8201dSApple OSS Distributions 		scheduled_count_till_qos += scheduled_count_for_bucket;
1436*fdd8201dSApple OSS Distributions 
1437*fdd8201dSApple OSS Distributions 		if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1438*fdd8201dSApple OSS Distributions 			if (qos > highest_qos_req) {
1439*fdd8201dSApple OSS Distributions 				highest_qos_req = qos;
1440*fdd8201dSApple OSS Distributions 			}
1441*fdd8201dSApple OSS Distributions 			/*
1442*fdd8201dSApple OSS Distributions 			 * The pool isn't saturated for threads at and above this QoS, and
1443*fdd8201dSApple OSS Distributions 			 * this qos bucket has pending requests
1444*fdd8201dSApple OSS Distributions 			 */
1445*fdd8201dSApple OSS Distributions 			if (scheduled_count_till_qos < wq_cooperative_queue_max_size(wq)) {
1446*fdd8201dSApple OSS Distributions 				if (qos > highest_qos_req_with_width) {
1447*fdd8201dSApple OSS Distributions 					highest_qos_req_with_width = qos;
1448*fdd8201dSApple OSS Distributions 				}
1449*fdd8201dSApple OSS Distributions 			}
1450*fdd8201dSApple OSS Distributions 
1451*fdd8201dSApple OSS Distributions 			/*
1452*fdd8201dSApple OSS Distributions 			 * There are no threads scheduled for this bucket but there
1453*fdd8201dSApple OSS Distributions 			 * is work pending, give it at least 1 thread
1454*fdd8201dSApple OSS Distributions 			 */
1455*fdd8201dSApple OSS Distributions 			if (scheduled_count_for_bucket == 0) {
1456*fdd8201dSApple OSS Distributions 				if (qos > highest_qos_with_no_scheduled) {
1457*fdd8201dSApple OSS Distributions 					highest_qos_with_no_scheduled = qos;
1458*fdd8201dSApple OSS Distributions 				}
1459*fdd8201dSApple OSS Distributions 			}
1460*fdd8201dSApple OSS Distributions 		}
1461*fdd8201dSApple OSS Distributions 	}
1462*fdd8201dSApple OSS Distributions 
1463*fdd8201dSApple OSS Distributions 	wq->wq_cooperative_queue_best_req_qos = MAX(highest_qos_with_no_scheduled, highest_qos_req_with_width);
1464*fdd8201dSApple OSS Distributions 	if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1465*fdd8201dSApple OSS Distributions 		wq->wq_cooperative_queue_best_req_qos = highest_qos_req;
1466*fdd8201dSApple OSS Distributions 	}
1467*fdd8201dSApple OSS Distributions 
1468*fdd8201dSApple OSS Distributions #if DEBUG || DEVELOPMENT
1469*fdd8201dSApple OSS Distributions 	/* Assert that if we are showing up the next best req as UN, then there
1470*fdd8201dSApple OSS Distributions 	 * actually is no thread request in the cooperative pool buckets */
1471*fdd8201dSApple OSS Distributions 	if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1472*fdd8201dSApple OSS Distributions 		assert(!workq_has_cooperative_thread_requests(wq));
1473*fdd8201dSApple OSS Distributions 	}
1474*fdd8201dSApple OSS Distributions #endif
1475*fdd8201dSApple OSS Distributions 
1476*fdd8201dSApple OSS Distributions 	return old_best_req_qos != wq->wq_cooperative_queue_best_req_qos;
1477*fdd8201dSApple OSS Distributions }
1478*fdd8201dSApple OSS Distributions 
1479*fdd8201dSApple OSS Distributions /*
1480*fdd8201dSApple OSS Distributions  * Returns whether or not the input thread (or creator thread if uth is NULL)
1481*fdd8201dSApple OSS Distributions  * should be allowed to work as part of the cooperative pool for the <input qos>
1482*fdd8201dSApple OSS Distributions  * bucket.
1483*fdd8201dSApple OSS Distributions  *
1484*fdd8201dSApple OSS Distributions  * This function is called in a bunch of places:
1485*fdd8201dSApple OSS Distributions  *		a) Quantum expires for a thread and it is part of the cooperative pool
1486*fdd8201dSApple OSS Distributions  *		b) When trying to pick a thread request for the creator thread to
1487*fdd8201dSApple OSS Distributions  *		represent.
1488*fdd8201dSApple OSS Distributions  *		c) When a thread is trying to pick a thread request to actually bind to
1489*fdd8201dSApple OSS Distributions  *		and service.
1490*fdd8201dSApple OSS Distributions  *
1491*fdd8201dSApple OSS Distributions  * Called with workq lock held.
1492*fdd8201dSApple OSS Distributions  */
1493*fdd8201dSApple OSS Distributions 
1494*fdd8201dSApple OSS Distributions #define WQ_COOPERATIVE_POOL_UNSATURATED 1
1495*fdd8201dSApple OSS Distributions #define WQ_COOPERATIVE_BUCKET_UNSERVICED 2
1496*fdd8201dSApple OSS Distributions #define WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS 3
1497*fdd8201dSApple OSS Distributions 
1498*fdd8201dSApple OSS Distributions static bool
workq_cooperative_allowance(struct workqueue * wq,thread_qos_t qos,struct uthread * uth,bool may_start_timer)1499*fdd8201dSApple OSS Distributions workq_cooperative_allowance(struct workqueue *wq, thread_qos_t qos, struct uthread *uth,
1500*fdd8201dSApple OSS Distributions     bool may_start_timer)
1501*fdd8201dSApple OSS Distributions {
1502*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
1503*fdd8201dSApple OSS Distributions 
1504*fdd8201dSApple OSS Distributions 	bool exclude_thread_as_scheduled = false;
1505*fdd8201dSApple OSS Distributions 	bool passed_admissions = false;
1506*fdd8201dSApple OSS Distributions 	uint8_t bucket = _wq_bucket(qos);
1507*fdd8201dSApple OSS Distributions 
1508*fdd8201dSApple OSS Distributions 	if (uth && workq_thread_is_cooperative(uth)) {
1509*fdd8201dSApple OSS Distributions 		exclude_thread_as_scheduled = true;
1510*fdd8201dSApple OSS Distributions 		_wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_bucket);
1511*fdd8201dSApple OSS Distributions 	}
1512*fdd8201dSApple OSS Distributions 
1513*fdd8201dSApple OSS Distributions 	/*
1514*fdd8201dSApple OSS Distributions 	 * We have not saturated the pool yet, let this thread continue
1515*fdd8201dSApple OSS Distributions 	 */
1516*fdd8201dSApple OSS Distributions 	uint64_t total_cooperative_threads;
1517*fdd8201dSApple OSS Distributions 	total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
1518*fdd8201dSApple OSS Distributions 	if (total_cooperative_threads < wq_cooperative_queue_max_size(wq)) {
1519*fdd8201dSApple OSS Distributions 		passed_admissions = true;
1520*fdd8201dSApple OSS Distributions 		WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1521*fdd8201dSApple OSS Distributions 		    total_cooperative_threads, qos, passed_admissions,
1522*fdd8201dSApple OSS Distributions 		    WQ_COOPERATIVE_POOL_UNSATURATED);
1523*fdd8201dSApple OSS Distributions 		goto out;
1524*fdd8201dSApple OSS Distributions 	}
1525*fdd8201dSApple OSS Distributions 
1526*fdd8201dSApple OSS Distributions 	/*
1527*fdd8201dSApple OSS Distributions 	 * Without this thread, nothing is servicing the bucket which has pending
1528*fdd8201dSApple OSS Distributions 	 * work
1529*fdd8201dSApple OSS Distributions 	 */
1530*fdd8201dSApple OSS Distributions 	uint64_t bucket_scheduled = wq->wq_cooperative_queue_scheduled_count[bucket];
1531*fdd8201dSApple OSS Distributions 	if (bucket_scheduled == 0 &&
1532*fdd8201dSApple OSS Distributions 	    !STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1533*fdd8201dSApple OSS Distributions 		passed_admissions = true;
1534*fdd8201dSApple OSS Distributions 		WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1535*fdd8201dSApple OSS Distributions 		    total_cooperative_threads, qos, passed_admissions,
1536*fdd8201dSApple OSS Distributions 		    WQ_COOPERATIVE_BUCKET_UNSERVICED);
1537*fdd8201dSApple OSS Distributions 		goto out;
1538*fdd8201dSApple OSS Distributions 	}
1539*fdd8201dSApple OSS Distributions 
1540*fdd8201dSApple OSS Distributions 	/*
1541*fdd8201dSApple OSS Distributions 	 * If number of threads at the QoS bucket >= input QoS exceeds the max we want
1542*fdd8201dSApple OSS Distributions 	 * for the pool, deny this thread
1543*fdd8201dSApple OSS Distributions 	 */
1544*fdd8201dSApple OSS Distributions 	uint64_t aggregate_down_to_qos = workq_num_cooperative_threads_scheduled_to_qos(wq, qos);
1545*fdd8201dSApple OSS Distributions 	passed_admissions = (aggregate_down_to_qos < wq_cooperative_queue_max_size(wq));
1546*fdd8201dSApple OSS Distributions 	WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE, aggregate_down_to_qos,
1547*fdd8201dSApple OSS Distributions 	    qos, passed_admissions, WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS);
1548*fdd8201dSApple OSS Distributions 
1549*fdd8201dSApple OSS Distributions 	if (!passed_admissions && may_start_timer) {
1550*fdd8201dSApple OSS Distributions 		workq_schedule_delayed_thread_creation(wq, 0);
1551*fdd8201dSApple OSS Distributions 	}
1552*fdd8201dSApple OSS Distributions 
1553*fdd8201dSApple OSS Distributions out:
1554*fdd8201dSApple OSS Distributions 	if (exclude_thread_as_scheduled) {
1555*fdd8201dSApple OSS Distributions 		_wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_bucket);
1556*fdd8201dSApple OSS Distributions 	}
1557*fdd8201dSApple OSS Distributions 	return passed_admissions;
1558*fdd8201dSApple OSS Distributions }
1559*fdd8201dSApple OSS Distributions 
1560*fdd8201dSApple OSS Distributions /*
1561*fdd8201dSApple OSS Distributions  * returns true if the best request for the pool changed as a result of
1562*fdd8201dSApple OSS Distributions  * enqueuing this thread request.
1563*fdd8201dSApple OSS Distributions  */
1564*fdd8201dSApple OSS Distributions static bool
workq_threadreq_enqueue(struct workqueue * wq,workq_threadreq_t req)1565*fdd8201dSApple OSS Distributions workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
1566*fdd8201dSApple OSS Distributions {
1567*fdd8201dSApple OSS Distributions 	assert(req->tr_state == WORKQ_TR_STATE_NEW);
1568*fdd8201dSApple OSS Distributions 
1569*fdd8201dSApple OSS Distributions 	req->tr_state = WORKQ_TR_STATE_QUEUED;
1570*fdd8201dSApple OSS Distributions 	wq->wq_reqcount += req->tr_count;
1571*fdd8201dSApple OSS Distributions 
1572*fdd8201dSApple OSS Distributions 	if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1573*fdd8201dSApple OSS Distributions 		assert(wq->wq_event_manager_threadreq == NULL);
1574*fdd8201dSApple OSS Distributions 		assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
1575*fdd8201dSApple OSS Distributions 		assert(req->tr_count == 1);
1576*fdd8201dSApple OSS Distributions 		wq->wq_event_manager_threadreq = req;
1577*fdd8201dSApple OSS Distributions 		return true;
1578*fdd8201dSApple OSS Distributions 	}
1579*fdd8201dSApple OSS Distributions 
1580*fdd8201dSApple OSS Distributions 	if (workq_threadreq_is_cooperative(req)) {
1581*fdd8201dSApple OSS Distributions 		assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1582*fdd8201dSApple OSS Distributions 		assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1583*fdd8201dSApple OSS Distributions 
1584*fdd8201dSApple OSS Distributions 		struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1585*fdd8201dSApple OSS Distributions 		STAILQ_INSERT_TAIL(bucket, req, tr_link);
1586*fdd8201dSApple OSS Distributions 
1587*fdd8201dSApple OSS Distributions 		return _wq_cooperative_queue_refresh_best_req_qos(wq);
1588*fdd8201dSApple OSS Distributions 	}
1589*fdd8201dSApple OSS Distributions 
1590*fdd8201dSApple OSS Distributions 	struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
1591*fdd8201dSApple OSS Distributions 
1592*fdd8201dSApple OSS Distributions 	priority_queue_entry_set_sched_pri(q, &req->tr_entry,
1593*fdd8201dSApple OSS Distributions 	    workq_priority_for_req(req), false);
1594*fdd8201dSApple OSS Distributions 
1595*fdd8201dSApple OSS Distributions 	if (priority_queue_insert(q, &req->tr_entry)) {
1596*fdd8201dSApple OSS Distributions 		if (workq_threadreq_is_nonovercommit(req)) {
1597*fdd8201dSApple OSS Distributions 			_wq_thactive_refresh_best_constrained_req_qos(wq);
1598*fdd8201dSApple OSS Distributions 		}
1599*fdd8201dSApple OSS Distributions 		return true;
1600*fdd8201dSApple OSS Distributions 	}
1601*fdd8201dSApple OSS Distributions 	return false;
1602*fdd8201dSApple OSS Distributions }
1603*fdd8201dSApple OSS Distributions 
1604*fdd8201dSApple OSS Distributions /*
1605*fdd8201dSApple OSS Distributions  * returns true if one of the following is true (so as to update creator if
1606*fdd8201dSApple OSS Distributions  * needed):
1607*fdd8201dSApple OSS Distributions  *
1608*fdd8201dSApple OSS Distributions  * (a) the next highest request of the pool we dequeued the request from changed
1609*fdd8201dSApple OSS Distributions  * (b) the next highest requests of the pool the current thread used to be a
1610*fdd8201dSApple OSS Distributions  * part of, changed
1611*fdd8201dSApple OSS Distributions  *
1612*fdd8201dSApple OSS Distributions  * For overcommit, special and constrained pools, the next highest QoS for each
1613*fdd8201dSApple OSS Distributions  * pool just a MAX of pending requests so tracking (a) is sufficient.
1614*fdd8201dSApple OSS Distributions  *
1615*fdd8201dSApple OSS Distributions  * But for cooperative thread pool, the next highest QoS for the pool depends on
1616*fdd8201dSApple OSS Distributions  * schedule counts in the pool as well. So if the current thread used to be
1617*fdd8201dSApple OSS Distributions  * cooperative in it's previous logical run ie (b), then that can also affect
1618*fdd8201dSApple OSS Distributions  * cooperative pool's next best QoS requests.
1619*fdd8201dSApple OSS Distributions  */
1620*fdd8201dSApple OSS Distributions static bool
workq_threadreq_dequeue(struct workqueue * wq,workq_threadreq_t req,bool cooperative_sched_count_changed)1621*fdd8201dSApple OSS Distributions workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req,
1622*fdd8201dSApple OSS Distributions     bool cooperative_sched_count_changed)
1623*fdd8201dSApple OSS Distributions {
1624*fdd8201dSApple OSS Distributions 	wq->wq_reqcount--;
1625*fdd8201dSApple OSS Distributions 
1626*fdd8201dSApple OSS Distributions 	bool next_highest_request_changed = false;
1627*fdd8201dSApple OSS Distributions 
1628*fdd8201dSApple OSS Distributions 	if (--req->tr_count == 0) {
1629*fdd8201dSApple OSS Distributions 		if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1630*fdd8201dSApple OSS Distributions 			assert(wq->wq_event_manager_threadreq == req);
1631*fdd8201dSApple OSS Distributions 			assert(req->tr_count == 0);
1632*fdd8201dSApple OSS Distributions 			wq->wq_event_manager_threadreq = NULL;
1633*fdd8201dSApple OSS Distributions 
1634*fdd8201dSApple OSS Distributions 			/* If a cooperative thread was the one which picked up the manager
1635*fdd8201dSApple OSS Distributions 			 * thread request, we need to reevaluate the cooperative pool
1636*fdd8201dSApple OSS Distributions 			 * anyways.
1637*fdd8201dSApple OSS Distributions 			 */
1638*fdd8201dSApple OSS Distributions 			if (cooperative_sched_count_changed) {
1639*fdd8201dSApple OSS Distributions 				_wq_cooperative_queue_refresh_best_req_qos(wq);
1640*fdd8201dSApple OSS Distributions 			}
1641*fdd8201dSApple OSS Distributions 			return true;
1642*fdd8201dSApple OSS Distributions 		}
1643*fdd8201dSApple OSS Distributions 
1644*fdd8201dSApple OSS Distributions 		if (workq_threadreq_is_cooperative(req)) {
1645*fdd8201dSApple OSS Distributions 			assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1646*fdd8201dSApple OSS Distributions 			assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1647*fdd8201dSApple OSS Distributions 			/* Account for the fact that BG and MT are coalesced when
1648*fdd8201dSApple OSS Distributions 			 * calculating best request for cooperative pool
1649*fdd8201dSApple OSS Distributions 			 */
1650*fdd8201dSApple OSS Distributions 			assert(_wq_bucket(req->tr_qos) == _wq_bucket(wq->wq_cooperative_queue_best_req_qos));
1651*fdd8201dSApple OSS Distributions 
1652*fdd8201dSApple OSS Distributions 			struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1653*fdd8201dSApple OSS Distributions 			__assert_only workq_threadreq_t head = STAILQ_FIRST(bucket);
1654*fdd8201dSApple OSS Distributions 
1655*fdd8201dSApple OSS Distributions 			assert(head == req);
1656*fdd8201dSApple OSS Distributions 			STAILQ_REMOVE_HEAD(bucket, tr_link);
1657*fdd8201dSApple OSS Distributions 
1658*fdd8201dSApple OSS Distributions 			/*
1659*fdd8201dSApple OSS Distributions 			 * If the request we're dequeueing is cooperative, then the sched
1660*fdd8201dSApple OSS Distributions 			 * counts definitely changed.
1661*fdd8201dSApple OSS Distributions 			 */
1662*fdd8201dSApple OSS Distributions 			assert(cooperative_sched_count_changed);
1663*fdd8201dSApple OSS Distributions 		}
1664*fdd8201dSApple OSS Distributions 
1665*fdd8201dSApple OSS Distributions 		/*
1666*fdd8201dSApple OSS Distributions 		 * We want to do the cooperative pool refresh after dequeueing a
1667*fdd8201dSApple OSS Distributions 		 * cooperative thread request if any (to combine both effects into 1
1668*fdd8201dSApple OSS Distributions 		 * refresh operation)
1669*fdd8201dSApple OSS Distributions 		 */
1670*fdd8201dSApple OSS Distributions 		if (cooperative_sched_count_changed) {
1671*fdd8201dSApple OSS Distributions 			next_highest_request_changed = _wq_cooperative_queue_refresh_best_req_qos(wq);
1672*fdd8201dSApple OSS Distributions 		}
1673*fdd8201dSApple OSS Distributions 
1674*fdd8201dSApple OSS Distributions 		if (!workq_threadreq_is_cooperative(req)) {
1675*fdd8201dSApple OSS Distributions 			/*
1676*fdd8201dSApple OSS Distributions 			 * All other types of requests are enqueued in priority queues
1677*fdd8201dSApple OSS Distributions 			 */
1678*fdd8201dSApple OSS Distributions 
1679*fdd8201dSApple OSS Distributions 			if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
1680*fdd8201dSApple OSS Distributions 			    &req->tr_entry)) {
1681*fdd8201dSApple OSS Distributions 				next_highest_request_changed |= true;
1682*fdd8201dSApple OSS Distributions 				if (workq_threadreq_is_nonovercommit(req)) {
1683*fdd8201dSApple OSS Distributions 					_wq_thactive_refresh_best_constrained_req_qos(wq);
1684*fdd8201dSApple OSS Distributions 				}
1685*fdd8201dSApple OSS Distributions 			}
1686*fdd8201dSApple OSS Distributions 		}
1687*fdd8201dSApple OSS Distributions 	}
1688*fdd8201dSApple OSS Distributions 
1689*fdd8201dSApple OSS Distributions 	return next_highest_request_changed;
1690*fdd8201dSApple OSS Distributions }
1691*fdd8201dSApple OSS Distributions 
1692*fdd8201dSApple OSS Distributions static void
workq_threadreq_destroy(proc_t p,workq_threadreq_t req)1693*fdd8201dSApple OSS Distributions workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
1694*fdd8201dSApple OSS Distributions {
1695*fdd8201dSApple OSS Distributions 	req->tr_state = WORKQ_TR_STATE_CANCELED;
1696*fdd8201dSApple OSS Distributions 	if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
1697*fdd8201dSApple OSS Distributions 		kqueue_threadreq_cancel(p, req);
1698*fdd8201dSApple OSS Distributions 	} else {
1699*fdd8201dSApple OSS Distributions 		zfree(workq_zone_threadreq, req);
1700*fdd8201dSApple OSS Distributions 	}
1701*fdd8201dSApple OSS Distributions }
1702*fdd8201dSApple OSS Distributions 
1703*fdd8201dSApple OSS Distributions #pragma mark workqueue thread creation thread calls
1704*fdd8201dSApple OSS Distributions 
1705*fdd8201dSApple OSS Distributions static inline bool
workq_thread_call_prepost(struct workqueue * wq,uint32_t sched,uint32_t pend,uint32_t fail_mask)1706*fdd8201dSApple OSS Distributions workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
1707*fdd8201dSApple OSS Distributions     uint32_t fail_mask)
1708*fdd8201dSApple OSS Distributions {
1709*fdd8201dSApple OSS Distributions 	uint32_t old_flags, new_flags;
1710*fdd8201dSApple OSS Distributions 
1711*fdd8201dSApple OSS Distributions 	os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
1712*fdd8201dSApple OSS Distributions 		if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
1713*fdd8201dSApple OSS Distributions 		        os_atomic_rmw_loop_give_up(return false);
1714*fdd8201dSApple OSS Distributions 		}
1715*fdd8201dSApple OSS Distributions 		if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
1716*fdd8201dSApple OSS Distributions 		        new_flags = old_flags | pend;
1717*fdd8201dSApple OSS Distributions 		} else {
1718*fdd8201dSApple OSS Distributions 		        new_flags = old_flags | sched;
1719*fdd8201dSApple OSS Distributions 		}
1720*fdd8201dSApple OSS Distributions 	});
1721*fdd8201dSApple OSS Distributions 
1722*fdd8201dSApple OSS Distributions 	return (old_flags & WQ_PROC_SUSPENDED) == 0;
1723*fdd8201dSApple OSS Distributions }
1724*fdd8201dSApple OSS Distributions 
1725*fdd8201dSApple OSS Distributions #define WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART 0x1
1726*fdd8201dSApple OSS Distributions 
1727*fdd8201dSApple OSS Distributions static bool
workq_schedule_delayed_thread_creation(struct workqueue * wq,int flags)1728*fdd8201dSApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags)
1729*fdd8201dSApple OSS Distributions {
1730*fdd8201dSApple OSS Distributions 	assert(!preemption_enabled());
1731*fdd8201dSApple OSS Distributions 
1732*fdd8201dSApple OSS Distributions 	if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
1733*fdd8201dSApple OSS Distributions 	    WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
1734*fdd8201dSApple OSS Distributions 	    WQ_IMMEDIATE_CALL_SCHEDULED)) {
1735*fdd8201dSApple OSS Distributions 		return false;
1736*fdd8201dSApple OSS Distributions 	}
1737*fdd8201dSApple OSS Distributions 
1738*fdd8201dSApple OSS Distributions 	uint64_t now = mach_absolute_time();
1739*fdd8201dSApple OSS Distributions 
1740*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART) {
1741*fdd8201dSApple OSS Distributions 		/* do not change the window */
1742*fdd8201dSApple OSS Distributions 	} else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
1743*fdd8201dSApple OSS Distributions 		wq->wq_timer_interval *= 2;
1744*fdd8201dSApple OSS Distributions 		if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
1745*fdd8201dSApple OSS Distributions 			wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
1746*fdd8201dSApple OSS Distributions 		}
1747*fdd8201dSApple OSS Distributions 	} else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
1748*fdd8201dSApple OSS Distributions 		wq->wq_timer_interval /= 2;
1749*fdd8201dSApple OSS Distributions 		if (wq->wq_timer_interval < wq_stalled_window.abstime) {
1750*fdd8201dSApple OSS Distributions 			wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
1751*fdd8201dSApple OSS Distributions 		}
1752*fdd8201dSApple OSS Distributions 	}
1753*fdd8201dSApple OSS Distributions 
1754*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1755*fdd8201dSApple OSS Distributions 	    _wq_flags(wq), wq->wq_timer_interval);
1756*fdd8201dSApple OSS Distributions 
1757*fdd8201dSApple OSS Distributions 	thread_call_t call = wq->wq_delayed_call;
1758*fdd8201dSApple OSS Distributions 	uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
1759*fdd8201dSApple OSS Distributions 	uint64_t deadline = now + wq->wq_timer_interval;
1760*fdd8201dSApple OSS Distributions 	if (thread_call_enter1_delayed(call, (void *)arg, deadline)) {
1761*fdd8201dSApple OSS Distributions 		panic("delayed_call was already enqueued");
1762*fdd8201dSApple OSS Distributions 	}
1763*fdd8201dSApple OSS Distributions 	return true;
1764*fdd8201dSApple OSS Distributions }
1765*fdd8201dSApple OSS Distributions 
1766*fdd8201dSApple OSS Distributions static void
workq_schedule_immediate_thread_creation(struct workqueue * wq)1767*fdd8201dSApple OSS Distributions workq_schedule_immediate_thread_creation(struct workqueue *wq)
1768*fdd8201dSApple OSS Distributions {
1769*fdd8201dSApple OSS Distributions 	assert(!preemption_enabled());
1770*fdd8201dSApple OSS Distributions 
1771*fdd8201dSApple OSS Distributions 	if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
1772*fdd8201dSApple OSS Distributions 	    WQ_IMMEDIATE_CALL_PENDED, 0)) {
1773*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1774*fdd8201dSApple OSS Distributions 		    _wq_flags(wq), 0);
1775*fdd8201dSApple OSS Distributions 
1776*fdd8201dSApple OSS Distributions 		uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
1777*fdd8201dSApple OSS Distributions 		if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
1778*fdd8201dSApple OSS Distributions 			panic("immediate_call was already enqueued");
1779*fdd8201dSApple OSS Distributions 		}
1780*fdd8201dSApple OSS Distributions 	}
1781*fdd8201dSApple OSS Distributions }
1782*fdd8201dSApple OSS Distributions 
1783*fdd8201dSApple OSS Distributions void
workq_proc_suspended(struct proc * p)1784*fdd8201dSApple OSS Distributions workq_proc_suspended(struct proc *p)
1785*fdd8201dSApple OSS Distributions {
1786*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
1787*fdd8201dSApple OSS Distributions 
1788*fdd8201dSApple OSS Distributions 	if (wq) {
1789*fdd8201dSApple OSS Distributions 		os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
1790*fdd8201dSApple OSS Distributions 	}
1791*fdd8201dSApple OSS Distributions }
1792*fdd8201dSApple OSS Distributions 
1793*fdd8201dSApple OSS Distributions void
workq_proc_resumed(struct proc * p)1794*fdd8201dSApple OSS Distributions workq_proc_resumed(struct proc *p)
1795*fdd8201dSApple OSS Distributions {
1796*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
1797*fdd8201dSApple OSS Distributions 	uint32_t wq_flags;
1798*fdd8201dSApple OSS Distributions 
1799*fdd8201dSApple OSS Distributions 	if (!wq) {
1800*fdd8201dSApple OSS Distributions 		return;
1801*fdd8201dSApple OSS Distributions 	}
1802*fdd8201dSApple OSS Distributions 
1803*fdd8201dSApple OSS Distributions 	wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
1804*fdd8201dSApple OSS Distributions 	    WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
1805*fdd8201dSApple OSS Distributions 	if ((wq_flags & WQ_EXITING) == 0) {
1806*fdd8201dSApple OSS Distributions 		disable_preemption();
1807*fdd8201dSApple OSS Distributions 		if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
1808*fdd8201dSApple OSS Distributions 			workq_schedule_immediate_thread_creation(wq);
1809*fdd8201dSApple OSS Distributions 		} else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
1810*fdd8201dSApple OSS Distributions 			workq_schedule_delayed_thread_creation(wq,
1811*fdd8201dSApple OSS Distributions 			    WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
1812*fdd8201dSApple OSS Distributions 		}
1813*fdd8201dSApple OSS Distributions 		enable_preemption();
1814*fdd8201dSApple OSS Distributions 	}
1815*fdd8201dSApple OSS Distributions }
1816*fdd8201dSApple OSS Distributions 
1817*fdd8201dSApple OSS Distributions /**
1818*fdd8201dSApple OSS Distributions  * returns whether lastblocked_tsp is within wq_stalled_window usecs of now
1819*fdd8201dSApple OSS Distributions  */
1820*fdd8201dSApple OSS Distributions static bool
workq_thread_is_busy(uint64_t now,_Atomic uint64_t * lastblocked_tsp)1821*fdd8201dSApple OSS Distributions workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
1822*fdd8201dSApple OSS Distributions {
1823*fdd8201dSApple OSS Distributions 	uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
1824*fdd8201dSApple OSS Distributions 	if (now <= lastblocked_ts) {
1825*fdd8201dSApple OSS Distributions 		/*
1826*fdd8201dSApple OSS Distributions 		 * Because the update of the timestamp when a thread blocks
1827*fdd8201dSApple OSS Distributions 		 * isn't serialized against us looking at it (i.e. we don't hold
1828*fdd8201dSApple OSS Distributions 		 * the workq lock), it's possible to have a timestamp that matches
1829*fdd8201dSApple OSS Distributions 		 * the current time or that even looks to be in the future relative
1830*fdd8201dSApple OSS Distributions 		 * to when we grabbed the current time...
1831*fdd8201dSApple OSS Distributions 		 *
1832*fdd8201dSApple OSS Distributions 		 * Just treat this as a busy thread since it must have just blocked.
1833*fdd8201dSApple OSS Distributions 		 */
1834*fdd8201dSApple OSS Distributions 		return true;
1835*fdd8201dSApple OSS Distributions 	}
1836*fdd8201dSApple OSS Distributions 	return (now - lastblocked_ts) < wq_stalled_window.abstime;
1837*fdd8201dSApple OSS Distributions }
1838*fdd8201dSApple OSS Distributions 
1839*fdd8201dSApple OSS Distributions static void
workq_add_new_threads_call(void * _p,void * flags)1840*fdd8201dSApple OSS Distributions workq_add_new_threads_call(void *_p, void *flags)
1841*fdd8201dSApple OSS Distributions {
1842*fdd8201dSApple OSS Distributions 	proc_t p = _p;
1843*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
1844*fdd8201dSApple OSS Distributions 	uint32_t my_flag = (uint32_t)(uintptr_t)flags;
1845*fdd8201dSApple OSS Distributions 
1846*fdd8201dSApple OSS Distributions 	/*
1847*fdd8201dSApple OSS Distributions 	 * workq_exit() will set the workqueue to NULL before
1848*fdd8201dSApple OSS Distributions 	 * it cancels thread calls.
1849*fdd8201dSApple OSS Distributions 	 */
1850*fdd8201dSApple OSS Distributions 	if (!wq) {
1851*fdd8201dSApple OSS Distributions 		return;
1852*fdd8201dSApple OSS Distributions 	}
1853*fdd8201dSApple OSS Distributions 
1854*fdd8201dSApple OSS Distributions 	assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
1855*fdd8201dSApple OSS Distributions 	    (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
1856*fdd8201dSApple OSS Distributions 
1857*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
1858*fdd8201dSApple OSS Distributions 	    wq->wq_nthreads, wq->wq_thidlecount);
1859*fdd8201dSApple OSS Distributions 
1860*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
1861*fdd8201dSApple OSS Distributions 
1862*fdd8201dSApple OSS Distributions 	wq->wq_thread_call_last_run = mach_absolute_time();
1863*fdd8201dSApple OSS Distributions 	os_atomic_andnot(&wq->wq_flags, my_flag, release);
1864*fdd8201dSApple OSS Distributions 
1865*fdd8201dSApple OSS Distributions 	/* This can drop the workqueue lock, and take it again */
1866*fdd8201dSApple OSS Distributions 	workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
1867*fdd8201dSApple OSS Distributions 
1868*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
1869*fdd8201dSApple OSS Distributions 
1870*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
1871*fdd8201dSApple OSS Distributions 	    wq->wq_nthreads, wq->wq_thidlecount);
1872*fdd8201dSApple OSS Distributions }
1873*fdd8201dSApple OSS Distributions 
1874*fdd8201dSApple OSS Distributions #pragma mark thread state tracking
1875*fdd8201dSApple OSS Distributions 
1876*fdd8201dSApple OSS Distributions static void
workq_sched_callback(int type,thread_t thread)1877*fdd8201dSApple OSS Distributions workq_sched_callback(int type, thread_t thread)
1878*fdd8201dSApple OSS Distributions {
1879*fdd8201dSApple OSS Distributions 	thread_ro_t tro = get_thread_ro(thread);
1880*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(thread);
1881*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(tro->tro_proc);
1882*fdd8201dSApple OSS Distributions 	thread_qos_t req_qos, qos = uth->uu_workq_pri.qos_bucket;
1883*fdd8201dSApple OSS Distributions 	wq_thactive_t old_thactive;
1884*fdd8201dSApple OSS Distributions 	bool start_timer = false;
1885*fdd8201dSApple OSS Distributions 
1886*fdd8201dSApple OSS Distributions 	if (qos == WORKQ_THREAD_QOS_MANAGER) {
1887*fdd8201dSApple OSS Distributions 		return;
1888*fdd8201dSApple OSS Distributions 	}
1889*fdd8201dSApple OSS Distributions 
1890*fdd8201dSApple OSS Distributions 	switch (type) {
1891*fdd8201dSApple OSS Distributions 	case SCHED_CALL_BLOCK:
1892*fdd8201dSApple OSS Distributions 		old_thactive = _wq_thactive_dec(wq, qos);
1893*fdd8201dSApple OSS Distributions 		req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1894*fdd8201dSApple OSS Distributions 
1895*fdd8201dSApple OSS Distributions 		/*
1896*fdd8201dSApple OSS Distributions 		 * Remember the timestamp of the last thread that blocked in this
1897*fdd8201dSApple OSS Distributions 		 * bucket, it used used by admission checks to ignore one thread
1898*fdd8201dSApple OSS Distributions 		 * being inactive if this timestamp is recent enough.
1899*fdd8201dSApple OSS Distributions 		 *
1900*fdd8201dSApple OSS Distributions 		 * If we collide with another thread trying to update the
1901*fdd8201dSApple OSS Distributions 		 * last_blocked (really unlikely since another thread would have to
1902*fdd8201dSApple OSS Distributions 		 * get scheduled and then block after we start down this path), it's
1903*fdd8201dSApple OSS Distributions 		 * not a problem.  Either timestamp is adequate, so no need to retry
1904*fdd8201dSApple OSS Distributions 		 */
1905*fdd8201dSApple OSS Distributions 		os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
1906*fdd8201dSApple OSS Distributions 		    thread_last_run_time(thread), relaxed);
1907*fdd8201dSApple OSS Distributions 
1908*fdd8201dSApple OSS Distributions 		if (req_qos == THREAD_QOS_UNSPECIFIED) {
1909*fdd8201dSApple OSS Distributions 			/*
1910*fdd8201dSApple OSS Distributions 			 * No pending request at the moment we could unblock, move on.
1911*fdd8201dSApple OSS Distributions 			 */
1912*fdd8201dSApple OSS Distributions 		} else if (qos < req_qos) {
1913*fdd8201dSApple OSS Distributions 			/*
1914*fdd8201dSApple OSS Distributions 			 * The blocking thread is at a lower QoS than the highest currently
1915*fdd8201dSApple OSS Distributions 			 * pending constrained request, nothing has to be redriven
1916*fdd8201dSApple OSS Distributions 			 */
1917*fdd8201dSApple OSS Distributions 		} else {
1918*fdd8201dSApple OSS Distributions 			uint32_t max_busycount, old_req_count;
1919*fdd8201dSApple OSS Distributions 			old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
1920*fdd8201dSApple OSS Distributions 			    req_qos, NULL, &max_busycount);
1921*fdd8201dSApple OSS Distributions 			/*
1922*fdd8201dSApple OSS Distributions 			 * If it is possible that may_start_constrained_thread had refused
1923*fdd8201dSApple OSS Distributions 			 * admission due to being over the max concurrency, we may need to
1924*fdd8201dSApple OSS Distributions 			 * spin up a new thread.
1925*fdd8201dSApple OSS Distributions 			 *
1926*fdd8201dSApple OSS Distributions 			 * We take into account the maximum number of busy threads
1927*fdd8201dSApple OSS Distributions 			 * that can affect may_start_constrained_thread as looking at the
1928*fdd8201dSApple OSS Distributions 			 * actual number may_start_constrained_thread will see is racy.
1929*fdd8201dSApple OSS Distributions 			 *
1930*fdd8201dSApple OSS Distributions 			 * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
1931*fdd8201dSApple OSS Distributions 			 * between NCPU (4) and NCPU - 2 (2) we need to redrive.
1932*fdd8201dSApple OSS Distributions 			 */
1933*fdd8201dSApple OSS Distributions 			uint32_t conc = wq_max_parallelism[_wq_bucket(qos)];
1934*fdd8201dSApple OSS Distributions 			if (old_req_count <= conc && conc <= old_req_count + max_busycount) {
1935*fdd8201dSApple OSS Distributions 				start_timer = workq_schedule_delayed_thread_creation(wq, 0);
1936*fdd8201dSApple OSS Distributions 			}
1937*fdd8201dSApple OSS Distributions 		}
1938*fdd8201dSApple OSS Distributions 		if (__improbable(kdebug_enable)) {
1939*fdd8201dSApple OSS Distributions 			__unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
1940*fdd8201dSApple OSS Distributions 			    old_thactive, qos, NULL, NULL);
1941*fdd8201dSApple OSS Distributions 			WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
1942*fdd8201dSApple OSS Distributions 			    old - 1, qos | (req_qos << 8),
1943*fdd8201dSApple OSS Distributions 			    wq->wq_reqcount << 1 | start_timer);
1944*fdd8201dSApple OSS Distributions 		}
1945*fdd8201dSApple OSS Distributions 		break;
1946*fdd8201dSApple OSS Distributions 
1947*fdd8201dSApple OSS Distributions 	case SCHED_CALL_UNBLOCK:
1948*fdd8201dSApple OSS Distributions 		/*
1949*fdd8201dSApple OSS Distributions 		 * we cannot take the workqueue_lock here...
1950*fdd8201dSApple OSS Distributions 		 * an UNBLOCK can occur from a timer event which
1951*fdd8201dSApple OSS Distributions 		 * is run from an interrupt context... if the workqueue_lock
1952*fdd8201dSApple OSS Distributions 		 * is already held by this processor, we'll deadlock...
1953*fdd8201dSApple OSS Distributions 		 * the thread lock for the thread being UNBLOCKED
1954*fdd8201dSApple OSS Distributions 		 * is also held
1955*fdd8201dSApple OSS Distributions 		 */
1956*fdd8201dSApple OSS Distributions 		old_thactive = _wq_thactive_inc(wq, qos);
1957*fdd8201dSApple OSS Distributions 		if (__improbable(kdebug_enable)) {
1958*fdd8201dSApple OSS Distributions 			__unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
1959*fdd8201dSApple OSS Distributions 			    old_thactive, qos, NULL, NULL);
1960*fdd8201dSApple OSS Distributions 			req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1961*fdd8201dSApple OSS Distributions 			WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
1962*fdd8201dSApple OSS Distributions 			    old + 1, qos | (req_qos << 8),
1963*fdd8201dSApple OSS Distributions 			    wq->wq_threads_scheduled);
1964*fdd8201dSApple OSS Distributions 		}
1965*fdd8201dSApple OSS Distributions 		break;
1966*fdd8201dSApple OSS Distributions 	}
1967*fdd8201dSApple OSS Distributions }
1968*fdd8201dSApple OSS Distributions 
1969*fdd8201dSApple OSS Distributions #pragma mark workq lifecycle
1970*fdd8201dSApple OSS Distributions 
1971*fdd8201dSApple OSS Distributions void
workq_reference(struct workqueue * wq)1972*fdd8201dSApple OSS Distributions workq_reference(struct workqueue *wq)
1973*fdd8201dSApple OSS Distributions {
1974*fdd8201dSApple OSS Distributions 	os_ref_retain(&wq->wq_refcnt);
1975*fdd8201dSApple OSS Distributions }
1976*fdd8201dSApple OSS Distributions 
1977*fdd8201dSApple OSS Distributions static void
workq_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1978*fdd8201dSApple OSS Distributions workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
1979*fdd8201dSApple OSS Distributions     __assert_only mpsc_daemon_queue_t dq)
1980*fdd8201dSApple OSS Distributions {
1981*fdd8201dSApple OSS Distributions 	struct workqueue *wq;
1982*fdd8201dSApple OSS Distributions 	struct turnstile *ts;
1983*fdd8201dSApple OSS Distributions 
1984*fdd8201dSApple OSS Distributions 	wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
1985*fdd8201dSApple OSS Distributions 	assert(dq == &workq_deallocate_queue);
1986*fdd8201dSApple OSS Distributions 
1987*fdd8201dSApple OSS Distributions 	turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
1988*fdd8201dSApple OSS Distributions 	assert(ts);
1989*fdd8201dSApple OSS Distributions 	turnstile_cleanup();
1990*fdd8201dSApple OSS Distributions 	turnstile_deallocate(ts);
1991*fdd8201dSApple OSS Distributions 
1992*fdd8201dSApple OSS Distributions 	lck_ticket_destroy(&wq->wq_lock, &workq_lck_grp);
1993*fdd8201dSApple OSS Distributions 	zfree(workq_zone_workqueue, wq);
1994*fdd8201dSApple OSS Distributions }
1995*fdd8201dSApple OSS Distributions 
1996*fdd8201dSApple OSS Distributions static void
workq_deallocate(struct workqueue * wq)1997*fdd8201dSApple OSS Distributions workq_deallocate(struct workqueue *wq)
1998*fdd8201dSApple OSS Distributions {
1999*fdd8201dSApple OSS Distributions 	if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
2000*fdd8201dSApple OSS Distributions 		workq_deallocate_queue_invoke(&wq->wq_destroy_link,
2001*fdd8201dSApple OSS Distributions 		    &workq_deallocate_queue);
2002*fdd8201dSApple OSS Distributions 	}
2003*fdd8201dSApple OSS Distributions }
2004*fdd8201dSApple OSS Distributions 
2005*fdd8201dSApple OSS Distributions void
workq_deallocate_safe(struct workqueue * wq)2006*fdd8201dSApple OSS Distributions workq_deallocate_safe(struct workqueue *wq)
2007*fdd8201dSApple OSS Distributions {
2008*fdd8201dSApple OSS Distributions 	if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
2009*fdd8201dSApple OSS Distributions 		mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
2010*fdd8201dSApple OSS Distributions 		    MPSC_QUEUE_DISABLE_PREEMPTION);
2011*fdd8201dSApple OSS Distributions 	}
2012*fdd8201dSApple OSS Distributions }
2013*fdd8201dSApple OSS Distributions 
2014*fdd8201dSApple OSS Distributions /**
2015*fdd8201dSApple OSS Distributions  * Setup per-process state for the workqueue.
2016*fdd8201dSApple OSS Distributions  */
2017*fdd8201dSApple OSS Distributions int
workq_open(struct proc * p,__unused struct workq_open_args * uap,__unused int32_t * retval)2018*fdd8201dSApple OSS Distributions workq_open(struct proc *p, __unused struct workq_open_args *uap,
2019*fdd8201dSApple OSS Distributions     __unused int32_t *retval)
2020*fdd8201dSApple OSS Distributions {
2021*fdd8201dSApple OSS Distributions 	struct workqueue *wq;
2022*fdd8201dSApple OSS Distributions 	int error = 0;
2023*fdd8201dSApple OSS Distributions 
2024*fdd8201dSApple OSS Distributions 	if ((p->p_lflag & P_LREGISTER) == 0) {
2025*fdd8201dSApple OSS Distributions 		return EINVAL;
2026*fdd8201dSApple OSS Distributions 	}
2027*fdd8201dSApple OSS Distributions 
2028*fdd8201dSApple OSS Distributions 	if (wq_init_constrained_limit) {
2029*fdd8201dSApple OSS Distributions 		uint32_t limit, num_cpus = ml_wait_max_cpus();
2030*fdd8201dSApple OSS Distributions 
2031*fdd8201dSApple OSS Distributions 		/*
2032*fdd8201dSApple OSS Distributions 		 * set up the limit for the constrained pool
2033*fdd8201dSApple OSS Distributions 		 * this is a virtual pool in that we don't
2034*fdd8201dSApple OSS Distributions 		 * maintain it on a separate idle and run list
2035*fdd8201dSApple OSS Distributions 		 */
2036*fdd8201dSApple OSS Distributions 		limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
2037*fdd8201dSApple OSS Distributions 
2038*fdd8201dSApple OSS Distributions 		if (limit > wq_max_constrained_threads) {
2039*fdd8201dSApple OSS Distributions 			wq_max_constrained_threads = limit;
2040*fdd8201dSApple OSS Distributions 		}
2041*fdd8201dSApple OSS Distributions 
2042*fdd8201dSApple OSS Distributions 		if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
2043*fdd8201dSApple OSS Distributions 			wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
2044*fdd8201dSApple OSS Distributions 		}
2045*fdd8201dSApple OSS Distributions 		if (wq_max_threads > CONFIG_THREAD_MAX - 20) {
2046*fdd8201dSApple OSS Distributions 			wq_max_threads = CONFIG_THREAD_MAX - 20;
2047*fdd8201dSApple OSS Distributions 		}
2048*fdd8201dSApple OSS Distributions 
2049*fdd8201dSApple OSS Distributions 		wq_death_max_load = (uint16_t)fls(num_cpus) + 1;
2050*fdd8201dSApple OSS Distributions 
2051*fdd8201dSApple OSS Distributions 		for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
2052*fdd8201dSApple OSS Distributions 			wq_max_parallelism[_wq_bucket(qos)] =
2053*fdd8201dSApple OSS Distributions 			    qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
2054*fdd8201dSApple OSS Distributions 		}
2055*fdd8201dSApple OSS Distributions 
2056*fdd8201dSApple OSS Distributions 		wq_max_cooperative_threads = num_cpus;
2057*fdd8201dSApple OSS Distributions 
2058*fdd8201dSApple OSS Distributions 		wq_init_constrained_limit = 0;
2059*fdd8201dSApple OSS Distributions 	}
2060*fdd8201dSApple OSS Distributions 
2061*fdd8201dSApple OSS Distributions 	if (proc_get_wqptr(p) == NULL) {
2062*fdd8201dSApple OSS Distributions 		if (proc_init_wqptr_or_wait(p) == FALSE) {
2063*fdd8201dSApple OSS Distributions 			assert(proc_get_wqptr(p) != NULL);
2064*fdd8201dSApple OSS Distributions 			goto out;
2065*fdd8201dSApple OSS Distributions 		}
2066*fdd8201dSApple OSS Distributions 
2067*fdd8201dSApple OSS Distributions 		wq = zalloc_flags(workq_zone_workqueue, Z_WAITOK | Z_ZERO);
2068*fdd8201dSApple OSS Distributions 
2069*fdd8201dSApple OSS Distributions 		os_ref_init_count(&wq->wq_refcnt, &workq_refgrp, 1);
2070*fdd8201dSApple OSS Distributions 
2071*fdd8201dSApple OSS Distributions 		// Start the event manager at the priority hinted at by the policy engine
2072*fdd8201dSApple OSS Distributions 		thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
2073*fdd8201dSApple OSS Distributions 		pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
2074*fdd8201dSApple OSS Distributions 		wq->wq_event_manager_priority = (uint32_t)pp;
2075*fdd8201dSApple OSS Distributions 		wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
2076*fdd8201dSApple OSS Distributions 		wq->wq_proc = p;
2077*fdd8201dSApple OSS Distributions 		turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
2078*fdd8201dSApple OSS Distributions 		    TURNSTILE_WORKQS);
2079*fdd8201dSApple OSS Distributions 
2080*fdd8201dSApple OSS Distributions 		TAILQ_INIT(&wq->wq_thrunlist);
2081*fdd8201dSApple OSS Distributions 		TAILQ_INIT(&wq->wq_thnewlist);
2082*fdd8201dSApple OSS Distributions 		TAILQ_INIT(&wq->wq_thidlelist);
2083*fdd8201dSApple OSS Distributions 		priority_queue_init(&wq->wq_overcommit_queue);
2084*fdd8201dSApple OSS Distributions 		priority_queue_init(&wq->wq_constrained_queue);
2085*fdd8201dSApple OSS Distributions 		priority_queue_init(&wq->wq_special_queue);
2086*fdd8201dSApple OSS Distributions 		for (int bucket = 0; bucket < WORKQ_NUM_QOS_BUCKETS; bucket++) {
2087*fdd8201dSApple OSS Distributions 			STAILQ_INIT(&wq->wq_cooperative_queue[bucket]);
2088*fdd8201dSApple OSS Distributions 		}
2089*fdd8201dSApple OSS Distributions 
2090*fdd8201dSApple OSS Distributions 		/* We are only using the delayed thread call for the constrained pool
2091*fdd8201dSApple OSS Distributions 		 * which can't have work at >= UI QoS and so we can be fine with a
2092*fdd8201dSApple OSS Distributions 		 * UI QoS thread call.
2093*fdd8201dSApple OSS Distributions 		 */
2094*fdd8201dSApple OSS Distributions 		wq->wq_delayed_call = thread_call_allocate_with_qos(
2095*fdd8201dSApple OSS Distributions 			workq_add_new_threads_call, p, THREAD_QOS_USER_INTERACTIVE,
2096*fdd8201dSApple OSS Distributions 			THREAD_CALL_OPTIONS_ONCE);
2097*fdd8201dSApple OSS Distributions 		wq->wq_immediate_call = thread_call_allocate_with_options(
2098*fdd8201dSApple OSS Distributions 			workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
2099*fdd8201dSApple OSS Distributions 			THREAD_CALL_OPTIONS_ONCE);
2100*fdd8201dSApple OSS Distributions 		wq->wq_death_call = thread_call_allocate_with_options(
2101*fdd8201dSApple OSS Distributions 			workq_kill_old_threads_call, wq,
2102*fdd8201dSApple OSS Distributions 			THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
2103*fdd8201dSApple OSS Distributions 
2104*fdd8201dSApple OSS Distributions 		lck_ticket_init(&wq->wq_lock, &workq_lck_grp);
2105*fdd8201dSApple OSS Distributions 
2106*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
2107*fdd8201dSApple OSS Distributions 		    VM_KERNEL_ADDRHIDE(wq), 0, 0);
2108*fdd8201dSApple OSS Distributions 		proc_set_wqptr(p, wq);
2109*fdd8201dSApple OSS Distributions 	}
2110*fdd8201dSApple OSS Distributions out:
2111*fdd8201dSApple OSS Distributions 
2112*fdd8201dSApple OSS Distributions 	return error;
2113*fdd8201dSApple OSS Distributions }
2114*fdd8201dSApple OSS Distributions 
2115*fdd8201dSApple OSS Distributions /*
2116*fdd8201dSApple OSS Distributions  * Routine:	workq_mark_exiting
2117*fdd8201dSApple OSS Distributions  *
2118*fdd8201dSApple OSS Distributions  * Function:	Mark the work queue such that new threads will not be added to the
2119*fdd8201dSApple OSS Distributions  *		work queue after we return.
2120*fdd8201dSApple OSS Distributions  *
2121*fdd8201dSApple OSS Distributions  * Conditions:	Called against the current process.
2122*fdd8201dSApple OSS Distributions  */
2123*fdd8201dSApple OSS Distributions void
workq_mark_exiting(struct proc * p)2124*fdd8201dSApple OSS Distributions workq_mark_exiting(struct proc *p)
2125*fdd8201dSApple OSS Distributions {
2126*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
2127*fdd8201dSApple OSS Distributions 	uint32_t wq_flags;
2128*fdd8201dSApple OSS Distributions 	workq_threadreq_t mgr_req;
2129*fdd8201dSApple OSS Distributions 
2130*fdd8201dSApple OSS Distributions 	if (!wq) {
2131*fdd8201dSApple OSS Distributions 		return;
2132*fdd8201dSApple OSS Distributions 	}
2133*fdd8201dSApple OSS Distributions 
2134*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0);
2135*fdd8201dSApple OSS Distributions 
2136*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
2137*fdd8201dSApple OSS Distributions 
2138*fdd8201dSApple OSS Distributions 	wq_flags = os_atomic_or_orig(&wq->wq_flags, WQ_EXITING, relaxed);
2139*fdd8201dSApple OSS Distributions 	if (__improbable(wq_flags & WQ_EXITING)) {
2140*fdd8201dSApple OSS Distributions 		panic("workq_mark_exiting called twice");
2141*fdd8201dSApple OSS Distributions 	}
2142*fdd8201dSApple OSS Distributions 
2143*fdd8201dSApple OSS Distributions 	/*
2144*fdd8201dSApple OSS Distributions 	 * Opportunistically try to cancel thread calls that are likely in flight.
2145*fdd8201dSApple OSS Distributions 	 * workq_exit() will do the proper cleanup.
2146*fdd8201dSApple OSS Distributions 	 */
2147*fdd8201dSApple OSS Distributions 	if (wq_flags & WQ_IMMEDIATE_CALL_SCHEDULED) {
2148*fdd8201dSApple OSS Distributions 		thread_call_cancel(wq->wq_immediate_call);
2149*fdd8201dSApple OSS Distributions 	}
2150*fdd8201dSApple OSS Distributions 	if (wq_flags & WQ_DELAYED_CALL_SCHEDULED) {
2151*fdd8201dSApple OSS Distributions 		thread_call_cancel(wq->wq_delayed_call);
2152*fdd8201dSApple OSS Distributions 	}
2153*fdd8201dSApple OSS Distributions 	if (wq_flags & WQ_DEATH_CALL_SCHEDULED) {
2154*fdd8201dSApple OSS Distributions 		thread_call_cancel(wq->wq_death_call);
2155*fdd8201dSApple OSS Distributions 	}
2156*fdd8201dSApple OSS Distributions 
2157*fdd8201dSApple OSS Distributions 	mgr_req = wq->wq_event_manager_threadreq;
2158*fdd8201dSApple OSS Distributions 	wq->wq_event_manager_threadreq = NULL;
2159*fdd8201dSApple OSS Distributions 	wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
2160*fdd8201dSApple OSS Distributions 	wq->wq_creator = NULL;
2161*fdd8201dSApple OSS Distributions 	workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
2162*fdd8201dSApple OSS Distributions 
2163*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
2164*fdd8201dSApple OSS Distributions 
2165*fdd8201dSApple OSS Distributions 	if (mgr_req) {
2166*fdd8201dSApple OSS Distributions 		kqueue_threadreq_cancel(p, mgr_req);
2167*fdd8201dSApple OSS Distributions 	}
2168*fdd8201dSApple OSS Distributions 	/*
2169*fdd8201dSApple OSS Distributions 	 * No one touches the priority queues once WQ_EXITING is set.
2170*fdd8201dSApple OSS Distributions 	 * It is hence safe to do the tear down without holding any lock.
2171*fdd8201dSApple OSS Distributions 	 */
2172*fdd8201dSApple OSS Distributions 	priority_queue_destroy(&wq->wq_overcommit_queue,
2173*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2174*fdd8201dSApple OSS Distributions 		workq_threadreq_destroy(p, e);
2175*fdd8201dSApple OSS Distributions 	});
2176*fdd8201dSApple OSS Distributions 	priority_queue_destroy(&wq->wq_constrained_queue,
2177*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2178*fdd8201dSApple OSS Distributions 		workq_threadreq_destroy(p, e);
2179*fdd8201dSApple OSS Distributions 	});
2180*fdd8201dSApple OSS Distributions 	priority_queue_destroy(&wq->wq_special_queue,
2181*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2182*fdd8201dSApple OSS Distributions 		workq_threadreq_destroy(p, e);
2183*fdd8201dSApple OSS Distributions 	});
2184*fdd8201dSApple OSS Distributions 
2185*fdd8201dSApple OSS Distributions 	WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0);
2186*fdd8201dSApple OSS Distributions }
2187*fdd8201dSApple OSS Distributions 
2188*fdd8201dSApple OSS Distributions /*
2189*fdd8201dSApple OSS Distributions  * Routine:	workq_exit
2190*fdd8201dSApple OSS Distributions  *
2191*fdd8201dSApple OSS Distributions  * Function:	clean up the work queue structure(s) now that there are no threads
2192*fdd8201dSApple OSS Distributions  *		left running inside the work queue (except possibly current_thread).
2193*fdd8201dSApple OSS Distributions  *
2194*fdd8201dSApple OSS Distributions  * Conditions:	Called by the last thread in the process.
2195*fdd8201dSApple OSS Distributions  *		Called against current process.
2196*fdd8201dSApple OSS Distributions  */
2197*fdd8201dSApple OSS Distributions void
workq_exit(struct proc * p)2198*fdd8201dSApple OSS Distributions workq_exit(struct proc *p)
2199*fdd8201dSApple OSS Distributions {
2200*fdd8201dSApple OSS Distributions 	struct workqueue *wq;
2201*fdd8201dSApple OSS Distributions 	struct uthread *uth, *tmp;
2202*fdd8201dSApple OSS Distributions 
2203*fdd8201dSApple OSS Distributions 	wq = os_atomic_xchg(&p->p_wqptr, NULL, relaxed);
2204*fdd8201dSApple OSS Distributions 	if (wq != NULL) {
2205*fdd8201dSApple OSS Distributions 		thread_t th = current_thread();
2206*fdd8201dSApple OSS Distributions 
2207*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0);
2208*fdd8201dSApple OSS Distributions 
2209*fdd8201dSApple OSS Distributions 		if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
2210*fdd8201dSApple OSS Distributions 			/*
2211*fdd8201dSApple OSS Distributions 			 * <rdar://problem/40111515> Make sure we will no longer call the
2212*fdd8201dSApple OSS Distributions 			 * sched call, if we ever block this thread, which the cancel_wait
2213*fdd8201dSApple OSS Distributions 			 * below can do.
2214*fdd8201dSApple OSS Distributions 			 */
2215*fdd8201dSApple OSS Distributions 			thread_sched_call(th, NULL);
2216*fdd8201dSApple OSS Distributions 		}
2217*fdd8201dSApple OSS Distributions 
2218*fdd8201dSApple OSS Distributions 		/*
2219*fdd8201dSApple OSS Distributions 		 * Thread calls are always scheduled by the proc itself or under the
2220*fdd8201dSApple OSS Distributions 		 * workqueue spinlock if WQ_EXITING is not yet set.
2221*fdd8201dSApple OSS Distributions 		 *
2222*fdd8201dSApple OSS Distributions 		 * Either way, when this runs, the proc has no threads left beside
2223*fdd8201dSApple OSS Distributions 		 * the one running this very code, so we know no thread call can be
2224*fdd8201dSApple OSS Distributions 		 * dispatched anymore.
2225*fdd8201dSApple OSS Distributions 		 */
2226*fdd8201dSApple OSS Distributions 		thread_call_cancel_wait(wq->wq_delayed_call);
2227*fdd8201dSApple OSS Distributions 		thread_call_cancel_wait(wq->wq_immediate_call);
2228*fdd8201dSApple OSS Distributions 		thread_call_cancel_wait(wq->wq_death_call);
2229*fdd8201dSApple OSS Distributions 		thread_call_free(wq->wq_delayed_call);
2230*fdd8201dSApple OSS Distributions 		thread_call_free(wq->wq_immediate_call);
2231*fdd8201dSApple OSS Distributions 		thread_call_free(wq->wq_death_call);
2232*fdd8201dSApple OSS Distributions 
2233*fdd8201dSApple OSS Distributions 		/*
2234*fdd8201dSApple OSS Distributions 		 * Clean up workqueue data structures for threads that exited and
2235*fdd8201dSApple OSS Distributions 		 * didn't get a chance to clean up after themselves.
2236*fdd8201dSApple OSS Distributions 		 *
2237*fdd8201dSApple OSS Distributions 		 * idle/new threads should have been interrupted and died on their own
2238*fdd8201dSApple OSS Distributions 		 */
2239*fdd8201dSApple OSS Distributions 		TAILQ_FOREACH_SAFE(uth, &wq->wq_thrunlist, uu_workq_entry, tmp) {
2240*fdd8201dSApple OSS Distributions 			thread_t mth = get_machthread(uth);
2241*fdd8201dSApple OSS Distributions 			thread_sched_call(mth, NULL);
2242*fdd8201dSApple OSS Distributions 			thread_deallocate(mth);
2243*fdd8201dSApple OSS Distributions 		}
2244*fdd8201dSApple OSS Distributions 		assert(TAILQ_EMPTY(&wq->wq_thnewlist));
2245*fdd8201dSApple OSS Distributions 		assert(TAILQ_EMPTY(&wq->wq_thidlelist));
2246*fdd8201dSApple OSS Distributions 
2247*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
2248*fdd8201dSApple OSS Distributions 		    VM_KERNEL_ADDRHIDE(wq), 0, 0);
2249*fdd8201dSApple OSS Distributions 
2250*fdd8201dSApple OSS Distributions 		workq_deallocate(wq);
2251*fdd8201dSApple OSS Distributions 
2252*fdd8201dSApple OSS Distributions 		WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0);
2253*fdd8201dSApple OSS Distributions 	}
2254*fdd8201dSApple OSS Distributions }
2255*fdd8201dSApple OSS Distributions 
2256*fdd8201dSApple OSS Distributions 
2257*fdd8201dSApple OSS Distributions #pragma mark bsd thread control
2258*fdd8201dSApple OSS Distributions 
2259*fdd8201dSApple OSS Distributions bool
bsdthread_part_of_cooperative_workqueue(struct uthread * uth)2260*fdd8201dSApple OSS Distributions bsdthread_part_of_cooperative_workqueue(struct uthread *uth)
2261*fdd8201dSApple OSS Distributions {
2262*fdd8201dSApple OSS Distributions 	return (workq_thread_is_cooperative(uth) || workq_thread_is_nonovercommit(uth)) &&
2263*fdd8201dSApple OSS Distributions 	       (uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER);
2264*fdd8201dSApple OSS Distributions }
2265*fdd8201dSApple OSS Distributions 
2266*fdd8201dSApple OSS Distributions static bool
_pthread_priority_to_policy(pthread_priority_t priority,thread_qos_policy_data_t * data)2267*fdd8201dSApple OSS Distributions _pthread_priority_to_policy(pthread_priority_t priority,
2268*fdd8201dSApple OSS Distributions     thread_qos_policy_data_t *data)
2269*fdd8201dSApple OSS Distributions {
2270*fdd8201dSApple OSS Distributions 	data->qos_tier = _pthread_priority_thread_qos(priority);
2271*fdd8201dSApple OSS Distributions 	data->tier_importance = _pthread_priority_relpri(priority);
2272*fdd8201dSApple OSS Distributions 	if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
2273*fdd8201dSApple OSS Distributions 	    data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
2274*fdd8201dSApple OSS Distributions 		return false;
2275*fdd8201dSApple OSS Distributions 	}
2276*fdd8201dSApple OSS Distributions 	return true;
2277*fdd8201dSApple OSS Distributions }
2278*fdd8201dSApple OSS Distributions 
2279*fdd8201dSApple OSS Distributions static int
bsdthread_set_self(proc_t p,thread_t th,pthread_priority_t priority,mach_port_name_t voucher,enum workq_set_self_flags flags)2280*fdd8201dSApple OSS Distributions bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
2281*fdd8201dSApple OSS Distributions     mach_port_name_t voucher, enum workq_set_self_flags flags)
2282*fdd8201dSApple OSS Distributions {
2283*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(th);
2284*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
2285*fdd8201dSApple OSS Distributions 
2286*fdd8201dSApple OSS Distributions 	kern_return_t kr;
2287*fdd8201dSApple OSS Distributions 	int unbind_rv = 0, qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
2288*fdd8201dSApple OSS Distributions 	bool is_wq_thread = (thread_get_tag(th) & THREAD_TAG_WORKQUEUE);
2289*fdd8201dSApple OSS Distributions 
2290*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_SET_SELF_WQ_KEVENT_UNBIND) {
2291*fdd8201dSApple OSS Distributions 		if (!is_wq_thread) {
2292*fdd8201dSApple OSS Distributions 			unbind_rv = EINVAL;
2293*fdd8201dSApple OSS Distributions 			goto qos;
2294*fdd8201dSApple OSS Distributions 		}
2295*fdd8201dSApple OSS Distributions 
2296*fdd8201dSApple OSS Distributions 		if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
2297*fdd8201dSApple OSS Distributions 			unbind_rv = EINVAL;
2298*fdd8201dSApple OSS Distributions 			goto qos;
2299*fdd8201dSApple OSS Distributions 		}
2300*fdd8201dSApple OSS Distributions 
2301*fdd8201dSApple OSS Distributions 		workq_threadreq_t kqr = uth->uu_kqr_bound;
2302*fdd8201dSApple OSS Distributions 		if (kqr == NULL) {
2303*fdd8201dSApple OSS Distributions 			unbind_rv = EALREADY;
2304*fdd8201dSApple OSS Distributions 			goto qos;
2305*fdd8201dSApple OSS Distributions 		}
2306*fdd8201dSApple OSS Distributions 
2307*fdd8201dSApple OSS Distributions 		if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2308*fdd8201dSApple OSS Distributions 			unbind_rv = EINVAL;
2309*fdd8201dSApple OSS Distributions 			goto qos;
2310*fdd8201dSApple OSS Distributions 		}
2311*fdd8201dSApple OSS Distributions 
2312*fdd8201dSApple OSS Distributions 		kqueue_threadreq_unbind(p, kqr);
2313*fdd8201dSApple OSS Distributions 	}
2314*fdd8201dSApple OSS Distributions 
2315*fdd8201dSApple OSS Distributions qos:
2316*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_SET_SELF_QOS_FLAG) {
2317*fdd8201dSApple OSS Distributions 		thread_qos_policy_data_t new_policy;
2318*fdd8201dSApple OSS Distributions 
2319*fdd8201dSApple OSS Distributions 		if (!_pthread_priority_to_policy(priority, &new_policy)) {
2320*fdd8201dSApple OSS Distributions 			qos_rv = EINVAL;
2321*fdd8201dSApple OSS Distributions 			goto voucher;
2322*fdd8201dSApple OSS Distributions 		}
2323*fdd8201dSApple OSS Distributions 
2324*fdd8201dSApple OSS Distributions 		if (!is_wq_thread) {
2325*fdd8201dSApple OSS Distributions 			/*
2326*fdd8201dSApple OSS Distributions 			 * Threads opted out of QoS can't change QoS
2327*fdd8201dSApple OSS Distributions 			 */
2328*fdd8201dSApple OSS Distributions 			if (!thread_has_qos_policy(th)) {
2329*fdd8201dSApple OSS Distributions 				qos_rv = EPERM;
2330*fdd8201dSApple OSS Distributions 				goto voucher;
2331*fdd8201dSApple OSS Distributions 			}
2332*fdd8201dSApple OSS Distributions 		} else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
2333*fdd8201dSApple OSS Distributions 		    uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
2334*fdd8201dSApple OSS Distributions 			/*
2335*fdd8201dSApple OSS Distributions 			 * Workqueue manager threads or threads above UI can't change QoS
2336*fdd8201dSApple OSS Distributions 			 */
2337*fdd8201dSApple OSS Distributions 			qos_rv = EINVAL;
2338*fdd8201dSApple OSS Distributions 			goto voucher;
2339*fdd8201dSApple OSS Distributions 		} else {
2340*fdd8201dSApple OSS Distributions 			/*
2341*fdd8201dSApple OSS Distributions 			 * For workqueue threads, possibly adjust buckets and redrive thread
2342*fdd8201dSApple OSS Distributions 			 * requests.
2343*fdd8201dSApple OSS Distributions 			 *
2344*fdd8201dSApple OSS Distributions 			 * Transitions allowed:
2345*fdd8201dSApple OSS Distributions 			 *
2346*fdd8201dSApple OSS Distributions 			 * overcommit --> non-overcommit
2347*fdd8201dSApple OSS Distributions 			 * overcommit --> overcommit
2348*fdd8201dSApple OSS Distributions 			 * non-overcommit --> non-overcommit
2349*fdd8201dSApple OSS Distributions 			 * non-overcommit --> overcommit (to be deprecated later)
2350*fdd8201dSApple OSS Distributions 			 * cooperative --> cooperative
2351*fdd8201dSApple OSS Distributions 			 *
2352*fdd8201dSApple OSS Distributions 			 * All other transitions aren't allowed so reject them.
2353*fdd8201dSApple OSS Distributions 			 */
2354*fdd8201dSApple OSS Distributions 			if (workq_thread_is_overcommit(uth) && _pthread_priority_is_cooperative(priority)) {
2355*fdd8201dSApple OSS Distributions 				qos_rv = EINVAL;
2356*fdd8201dSApple OSS Distributions 				goto voucher;
2357*fdd8201dSApple OSS Distributions 			} else if (workq_thread_is_cooperative(uth) && !_pthread_priority_is_cooperative(priority)) {
2358*fdd8201dSApple OSS Distributions 				qos_rv = EINVAL;
2359*fdd8201dSApple OSS Distributions 				goto voucher;
2360*fdd8201dSApple OSS Distributions 			} else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_cooperative(priority)) {
2361*fdd8201dSApple OSS Distributions 				qos_rv = EINVAL;
2362*fdd8201dSApple OSS Distributions 				goto voucher;
2363*fdd8201dSApple OSS Distributions 			}
2364*fdd8201dSApple OSS Distributions 
2365*fdd8201dSApple OSS Distributions 			struct uu_workq_policy old_pri, new_pri;
2366*fdd8201dSApple OSS Distributions 			bool force_run = false;
2367*fdd8201dSApple OSS Distributions 
2368*fdd8201dSApple OSS Distributions 			workq_lock_spin(wq);
2369*fdd8201dSApple OSS Distributions 
2370*fdd8201dSApple OSS Distributions 			old_pri = new_pri = uth->uu_workq_pri;
2371*fdd8201dSApple OSS Distributions 			new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
2372*fdd8201dSApple OSS Distributions 
2373*fdd8201dSApple OSS Distributions 			/* Adjust schedule counts for various types of transitions */
2374*fdd8201dSApple OSS Distributions 
2375*fdd8201dSApple OSS Distributions 			/* overcommit -> non-overcommit */
2376*fdd8201dSApple OSS Distributions 			if (workq_thread_is_overcommit(uth) && _pthread_priority_is_nonovercommit(priority)) {
2377*fdd8201dSApple OSS Distributions 				workq_thread_set_type(uth, 0);
2378*fdd8201dSApple OSS Distributions 				wq->wq_constrained_threads_scheduled++;
2379*fdd8201dSApple OSS Distributions 
2380*fdd8201dSApple OSS Distributions 				/* non-overcommit -> overcommit */
2381*fdd8201dSApple OSS Distributions 			} else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_overcommit(priority)) {
2382*fdd8201dSApple OSS Distributions 				workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
2383*fdd8201dSApple OSS Distributions 				force_run = (wq->wq_constrained_threads_scheduled-- == wq_max_constrained_threads);
2384*fdd8201dSApple OSS Distributions 
2385*fdd8201dSApple OSS Distributions 				/* cooperative -> cooperative */
2386*fdd8201dSApple OSS Distributions 			} else if (workq_thread_is_cooperative(uth)) {
2387*fdd8201dSApple OSS Distributions 				_wq_cooperative_queue_scheduled_count_dec(wq, old_pri.qos_bucket);
2388*fdd8201dSApple OSS Distributions 				_wq_cooperative_queue_scheduled_count_inc(wq, workq_pri_bucket(new_pri));
2389*fdd8201dSApple OSS Distributions 
2390*fdd8201dSApple OSS Distributions 				/* We're changing schedule counts within cooperative pool, we
2391*fdd8201dSApple OSS Distributions 				 * need to refresh best cooperative QoS logic again */
2392*fdd8201dSApple OSS Distributions 				force_run = _wq_cooperative_queue_refresh_best_req_qos(wq);
2393*fdd8201dSApple OSS Distributions 			}
2394*fdd8201dSApple OSS Distributions 
2395*fdd8201dSApple OSS Distributions 			/* This will also call schedule_creator if needed */
2396*fdd8201dSApple OSS Distributions 			workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
2397*fdd8201dSApple OSS Distributions 			workq_unlock(wq);
2398*fdd8201dSApple OSS Distributions 
2399*fdd8201dSApple OSS Distributions 			if (workq_thread_is_overcommit(uth)) {
2400*fdd8201dSApple OSS Distributions 				thread_disarm_workqueue_quantum(th);
2401*fdd8201dSApple OSS Distributions 			} else {
2402*fdd8201dSApple OSS Distributions 				/* If the thread changed QoS buckets, the quantum duration
2403*fdd8201dSApple OSS Distributions 				 * may have changed too */
2404*fdd8201dSApple OSS Distributions 				thread_arm_workqueue_quantum(th);
2405*fdd8201dSApple OSS Distributions 			}
2406*fdd8201dSApple OSS Distributions 		}
2407*fdd8201dSApple OSS Distributions 
2408*fdd8201dSApple OSS Distributions 		kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
2409*fdd8201dSApple OSS Distributions 		    (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
2410*fdd8201dSApple OSS Distributions 		if (kr != KERN_SUCCESS) {
2411*fdd8201dSApple OSS Distributions 			qos_rv = EINVAL;
2412*fdd8201dSApple OSS Distributions 		}
2413*fdd8201dSApple OSS Distributions 	}
2414*fdd8201dSApple OSS Distributions 
2415*fdd8201dSApple OSS Distributions voucher:
2416*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_SET_SELF_VOUCHER_FLAG) {
2417*fdd8201dSApple OSS Distributions 		kr = thread_set_voucher_name(voucher);
2418*fdd8201dSApple OSS Distributions 		if (kr != KERN_SUCCESS) {
2419*fdd8201dSApple OSS Distributions 			voucher_rv = ENOENT;
2420*fdd8201dSApple OSS Distributions 			goto fixedpri;
2421*fdd8201dSApple OSS Distributions 		}
2422*fdd8201dSApple OSS Distributions 	}
2423*fdd8201dSApple OSS Distributions 
2424*fdd8201dSApple OSS Distributions fixedpri:
2425*fdd8201dSApple OSS Distributions 	if (qos_rv) {
2426*fdd8201dSApple OSS Distributions 		goto done;
2427*fdd8201dSApple OSS Distributions 	}
2428*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
2429*fdd8201dSApple OSS Distributions 		thread_extended_policy_data_t extpol = {.timeshare = 0};
2430*fdd8201dSApple OSS Distributions 
2431*fdd8201dSApple OSS Distributions 		if (is_wq_thread) {
2432*fdd8201dSApple OSS Distributions 			/* Not allowed on workqueue threads */
2433*fdd8201dSApple OSS Distributions 			fixedpri_rv = ENOTSUP;
2434*fdd8201dSApple OSS Distributions 			goto done;
2435*fdd8201dSApple OSS Distributions 		}
2436*fdd8201dSApple OSS Distributions 
2437*fdd8201dSApple OSS Distributions 		kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2438*fdd8201dSApple OSS Distributions 		    (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2439*fdd8201dSApple OSS Distributions 		if (kr != KERN_SUCCESS) {
2440*fdd8201dSApple OSS Distributions 			fixedpri_rv = EINVAL;
2441*fdd8201dSApple OSS Distributions 			goto done;
2442*fdd8201dSApple OSS Distributions 		}
2443*fdd8201dSApple OSS Distributions 	} else if (flags & WORKQ_SET_SELF_TIMESHARE_FLAG) {
2444*fdd8201dSApple OSS Distributions 		thread_extended_policy_data_t extpol = {.timeshare = 1};
2445*fdd8201dSApple OSS Distributions 
2446*fdd8201dSApple OSS Distributions 		if (is_wq_thread) {
2447*fdd8201dSApple OSS Distributions 			/* Not allowed on workqueue threads */
2448*fdd8201dSApple OSS Distributions 			fixedpri_rv = ENOTSUP;
2449*fdd8201dSApple OSS Distributions 			goto done;
2450*fdd8201dSApple OSS Distributions 		}
2451*fdd8201dSApple OSS Distributions 
2452*fdd8201dSApple OSS Distributions 		kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2453*fdd8201dSApple OSS Distributions 		    (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2454*fdd8201dSApple OSS Distributions 		if (kr != KERN_SUCCESS) {
2455*fdd8201dSApple OSS Distributions 			fixedpri_rv = EINVAL;
2456*fdd8201dSApple OSS Distributions 			goto done;
2457*fdd8201dSApple OSS Distributions 		}
2458*fdd8201dSApple OSS Distributions 	}
2459*fdd8201dSApple OSS Distributions 
2460*fdd8201dSApple OSS Distributions done:
2461*fdd8201dSApple OSS Distributions 	if (qos_rv && voucher_rv) {
2462*fdd8201dSApple OSS Distributions 		/* Both failed, give that a unique error. */
2463*fdd8201dSApple OSS Distributions 		return EBADMSG;
2464*fdd8201dSApple OSS Distributions 	}
2465*fdd8201dSApple OSS Distributions 
2466*fdd8201dSApple OSS Distributions 	if (unbind_rv) {
2467*fdd8201dSApple OSS Distributions 		return unbind_rv;
2468*fdd8201dSApple OSS Distributions 	}
2469*fdd8201dSApple OSS Distributions 
2470*fdd8201dSApple OSS Distributions 	if (qos_rv) {
2471*fdd8201dSApple OSS Distributions 		return qos_rv;
2472*fdd8201dSApple OSS Distributions 	}
2473*fdd8201dSApple OSS Distributions 
2474*fdd8201dSApple OSS Distributions 	if (voucher_rv) {
2475*fdd8201dSApple OSS Distributions 		return voucher_rv;
2476*fdd8201dSApple OSS Distributions 	}
2477*fdd8201dSApple OSS Distributions 
2478*fdd8201dSApple OSS Distributions 	if (fixedpri_rv) {
2479*fdd8201dSApple OSS Distributions 		return fixedpri_rv;
2480*fdd8201dSApple OSS Distributions 	}
2481*fdd8201dSApple OSS Distributions 
2482*fdd8201dSApple OSS Distributions 
2483*fdd8201dSApple OSS Distributions 	return 0;
2484*fdd8201dSApple OSS Distributions }
2485*fdd8201dSApple OSS Distributions 
2486*fdd8201dSApple OSS Distributions static int
bsdthread_add_explicit_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t resource)2487*fdd8201dSApple OSS Distributions bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
2488*fdd8201dSApple OSS Distributions     pthread_priority_t pp, user_addr_t resource)
2489*fdd8201dSApple OSS Distributions {
2490*fdd8201dSApple OSS Distributions 	thread_qos_t qos = _pthread_priority_thread_qos(pp);
2491*fdd8201dSApple OSS Distributions 	if (qos == THREAD_QOS_UNSPECIFIED) {
2492*fdd8201dSApple OSS Distributions 		return EINVAL;
2493*fdd8201dSApple OSS Distributions 	}
2494*fdd8201dSApple OSS Distributions 
2495*fdd8201dSApple OSS Distributions 	thread_t th = port_name_to_thread(kport,
2496*fdd8201dSApple OSS Distributions 	    PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2497*fdd8201dSApple OSS Distributions 	if (th == THREAD_NULL) {
2498*fdd8201dSApple OSS Distributions 		return ESRCH;
2499*fdd8201dSApple OSS Distributions 	}
2500*fdd8201dSApple OSS Distributions 
2501*fdd8201dSApple OSS Distributions 	int rv = proc_thread_qos_add_override(p->task, th, 0, qos, TRUE,
2502*fdd8201dSApple OSS Distributions 	    resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2503*fdd8201dSApple OSS Distributions 
2504*fdd8201dSApple OSS Distributions 	thread_deallocate(th);
2505*fdd8201dSApple OSS Distributions 	return rv;
2506*fdd8201dSApple OSS Distributions }
2507*fdd8201dSApple OSS Distributions 
2508*fdd8201dSApple OSS Distributions static int
bsdthread_remove_explicit_override(proc_t p,mach_port_name_t kport,user_addr_t resource)2509*fdd8201dSApple OSS Distributions bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
2510*fdd8201dSApple OSS Distributions     user_addr_t resource)
2511*fdd8201dSApple OSS Distributions {
2512*fdd8201dSApple OSS Distributions 	thread_t th = port_name_to_thread(kport,
2513*fdd8201dSApple OSS Distributions 	    PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2514*fdd8201dSApple OSS Distributions 	if (th == THREAD_NULL) {
2515*fdd8201dSApple OSS Distributions 		return ESRCH;
2516*fdd8201dSApple OSS Distributions 	}
2517*fdd8201dSApple OSS Distributions 
2518*fdd8201dSApple OSS Distributions 	int rv = proc_thread_qos_remove_override(p->task, th, 0, resource,
2519*fdd8201dSApple OSS Distributions 	    THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2520*fdd8201dSApple OSS Distributions 
2521*fdd8201dSApple OSS Distributions 	thread_deallocate(th);
2522*fdd8201dSApple OSS Distributions 	return rv;
2523*fdd8201dSApple OSS Distributions }
2524*fdd8201dSApple OSS Distributions 
2525*fdd8201dSApple OSS Distributions static int
workq_thread_add_dispatch_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t ulock_addr)2526*fdd8201dSApple OSS Distributions workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
2527*fdd8201dSApple OSS Distributions     pthread_priority_t pp, user_addr_t ulock_addr)
2528*fdd8201dSApple OSS Distributions {
2529*fdd8201dSApple OSS Distributions 	struct uu_workq_policy old_pri, new_pri;
2530*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
2531*fdd8201dSApple OSS Distributions 
2532*fdd8201dSApple OSS Distributions 	thread_qos_t qos_override = _pthread_priority_thread_qos(pp);
2533*fdd8201dSApple OSS Distributions 	if (qos_override == THREAD_QOS_UNSPECIFIED) {
2534*fdd8201dSApple OSS Distributions 		return EINVAL;
2535*fdd8201dSApple OSS Distributions 	}
2536*fdd8201dSApple OSS Distributions 
2537*fdd8201dSApple OSS Distributions 	thread_t thread = port_name_to_thread(kport,
2538*fdd8201dSApple OSS Distributions 	    PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2539*fdd8201dSApple OSS Distributions 	if (thread == THREAD_NULL) {
2540*fdd8201dSApple OSS Distributions 		return ESRCH;
2541*fdd8201dSApple OSS Distributions 	}
2542*fdd8201dSApple OSS Distributions 
2543*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(thread);
2544*fdd8201dSApple OSS Distributions 	if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2545*fdd8201dSApple OSS Distributions 		thread_deallocate(thread);
2546*fdd8201dSApple OSS Distributions 		return EPERM;
2547*fdd8201dSApple OSS Distributions 	}
2548*fdd8201dSApple OSS Distributions 
2549*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
2550*fdd8201dSApple OSS Distributions 	    wq, thread_tid(thread), 1, pp);
2551*fdd8201dSApple OSS Distributions 
2552*fdd8201dSApple OSS Distributions 	thread_mtx_lock(thread);
2553*fdd8201dSApple OSS Distributions 
2554*fdd8201dSApple OSS Distributions 	if (ulock_addr) {
2555*fdd8201dSApple OSS Distributions 		uint32_t val;
2556*fdd8201dSApple OSS Distributions 		int rc;
2557*fdd8201dSApple OSS Distributions 		/*
2558*fdd8201dSApple OSS Distributions 		 * Workaround lack of explicit support for 'no-fault copyin'
2559*fdd8201dSApple OSS Distributions 		 * <rdar://problem/24999882>, as disabling preemption prevents paging in
2560*fdd8201dSApple OSS Distributions 		 */
2561*fdd8201dSApple OSS Distributions 		disable_preemption();
2562*fdd8201dSApple OSS Distributions 		rc = copyin_atomic32(ulock_addr, &val);
2563*fdd8201dSApple OSS Distributions 		enable_preemption();
2564*fdd8201dSApple OSS Distributions 		if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
2565*fdd8201dSApple OSS Distributions 			goto out;
2566*fdd8201dSApple OSS Distributions 		}
2567*fdd8201dSApple OSS Distributions 	}
2568*fdd8201dSApple OSS Distributions 
2569*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
2570*fdd8201dSApple OSS Distributions 
2571*fdd8201dSApple OSS Distributions 	old_pri = uth->uu_workq_pri;
2572*fdd8201dSApple OSS Distributions 	if (old_pri.qos_override >= qos_override) {
2573*fdd8201dSApple OSS Distributions 		/* Nothing to do */
2574*fdd8201dSApple OSS Distributions 	} else if (thread == current_thread()) {
2575*fdd8201dSApple OSS Distributions 		new_pri = old_pri;
2576*fdd8201dSApple OSS Distributions 		new_pri.qos_override = qos_override;
2577*fdd8201dSApple OSS Distributions 		workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2578*fdd8201dSApple OSS Distributions 	} else {
2579*fdd8201dSApple OSS Distributions 		uth->uu_workq_pri.qos_override = qos_override;
2580*fdd8201dSApple OSS Distributions 		if (qos_override > workq_pri_override(old_pri)) {
2581*fdd8201dSApple OSS Distributions 			thread_set_workq_override(thread, qos_override);
2582*fdd8201dSApple OSS Distributions 		}
2583*fdd8201dSApple OSS Distributions 	}
2584*fdd8201dSApple OSS Distributions 
2585*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
2586*fdd8201dSApple OSS Distributions 
2587*fdd8201dSApple OSS Distributions out:
2588*fdd8201dSApple OSS Distributions 	thread_mtx_unlock(thread);
2589*fdd8201dSApple OSS Distributions 	thread_deallocate(thread);
2590*fdd8201dSApple OSS Distributions 	return 0;
2591*fdd8201dSApple OSS Distributions }
2592*fdd8201dSApple OSS Distributions 
2593*fdd8201dSApple OSS Distributions static int
workq_thread_reset_dispatch_override(proc_t p,thread_t thread)2594*fdd8201dSApple OSS Distributions workq_thread_reset_dispatch_override(proc_t p, thread_t thread)
2595*fdd8201dSApple OSS Distributions {
2596*fdd8201dSApple OSS Distributions 	struct uu_workq_policy old_pri, new_pri;
2597*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
2598*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(thread);
2599*fdd8201dSApple OSS Distributions 
2600*fdd8201dSApple OSS Distributions 	if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2601*fdd8201dSApple OSS Distributions 		return EPERM;
2602*fdd8201dSApple OSS Distributions 	}
2603*fdd8201dSApple OSS Distributions 
2604*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, wq, 0, 0, 0);
2605*fdd8201dSApple OSS Distributions 
2606*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
2607*fdd8201dSApple OSS Distributions 	old_pri = new_pri = uth->uu_workq_pri;
2608*fdd8201dSApple OSS Distributions 	new_pri.qos_override = THREAD_QOS_UNSPECIFIED;
2609*fdd8201dSApple OSS Distributions 	workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2610*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
2611*fdd8201dSApple OSS Distributions 	return 0;
2612*fdd8201dSApple OSS Distributions }
2613*fdd8201dSApple OSS Distributions 
2614*fdd8201dSApple OSS Distributions static int
workq_thread_allow_kill(__unused proc_t p,thread_t thread,bool enable)2615*fdd8201dSApple OSS Distributions workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
2616*fdd8201dSApple OSS Distributions {
2617*fdd8201dSApple OSS Distributions 	if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
2618*fdd8201dSApple OSS Distributions 		// If the thread isn't a workqueue thread, don't set the
2619*fdd8201dSApple OSS Distributions 		// kill_allowed bit; however, we still need to return 0
2620*fdd8201dSApple OSS Distributions 		// instead of an error code since this code is executed
2621*fdd8201dSApple OSS Distributions 		// on the abort path which needs to not depend on the
2622*fdd8201dSApple OSS Distributions 		// pthread_t (returning an error depends on pthread_t via
2623*fdd8201dSApple OSS Distributions 		// cerror_nocancel)
2624*fdd8201dSApple OSS Distributions 		return 0;
2625*fdd8201dSApple OSS Distributions 	}
2626*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(thread);
2627*fdd8201dSApple OSS Distributions 	uth->uu_workq_pthread_kill_allowed = enable;
2628*fdd8201dSApple OSS Distributions 	return 0;
2629*fdd8201dSApple OSS Distributions }
2630*fdd8201dSApple OSS Distributions 
2631*fdd8201dSApple OSS Distributions static int
bsdthread_get_max_parallelism(thread_qos_t qos,unsigned long flags,int * retval)2632*fdd8201dSApple OSS Distributions bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
2633*fdd8201dSApple OSS Distributions     int *retval)
2634*fdd8201dSApple OSS Distributions {
2635*fdd8201dSApple OSS Distributions 	static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
2636*fdd8201dSApple OSS Distributions 	    _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
2637*fdd8201dSApple OSS Distributions 	static_assert(QOS_PARALLELISM_REALTIME ==
2638*fdd8201dSApple OSS Distributions 	    _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
2639*fdd8201dSApple OSS Distributions 	static_assert(QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE ==
2640*fdd8201dSApple OSS Distributions 	    _PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC, "cluster shared resource");
2641*fdd8201dSApple OSS Distributions 
2642*fdd8201dSApple OSS Distributions 	if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL | QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE)) {
2643*fdd8201dSApple OSS Distributions 		return EINVAL;
2644*fdd8201dSApple OSS Distributions 	}
2645*fdd8201dSApple OSS Distributions 
2646*fdd8201dSApple OSS Distributions 	/* No units are present */
2647*fdd8201dSApple OSS Distributions 	if (flags & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) {
2648*fdd8201dSApple OSS Distributions 		return ENOTSUP;
2649*fdd8201dSApple OSS Distributions 	}
2650*fdd8201dSApple OSS Distributions 
2651*fdd8201dSApple OSS Distributions 	if (flags & QOS_PARALLELISM_REALTIME) {
2652*fdd8201dSApple OSS Distributions 		if (qos) {
2653*fdd8201dSApple OSS Distributions 			return EINVAL;
2654*fdd8201dSApple OSS Distributions 		}
2655*fdd8201dSApple OSS Distributions 	} else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
2656*fdd8201dSApple OSS Distributions 		return EINVAL;
2657*fdd8201dSApple OSS Distributions 	}
2658*fdd8201dSApple OSS Distributions 
2659*fdd8201dSApple OSS Distributions 	*retval = qos_max_parallelism(qos, flags);
2660*fdd8201dSApple OSS Distributions 	return 0;
2661*fdd8201dSApple OSS Distributions }
2662*fdd8201dSApple OSS Distributions 
2663*fdd8201dSApple OSS Distributions static int
bsdthread_dispatch_apply_attr(__unused struct proc * p,thread_t thread,unsigned long flags,uint64_t value1,__unused uint64_t value2)2664*fdd8201dSApple OSS Distributions bsdthread_dispatch_apply_attr(__unused struct proc *p, thread_t thread,
2665*fdd8201dSApple OSS Distributions     unsigned long flags, uint64_t value1, __unused uint64_t value2)
2666*fdd8201dSApple OSS Distributions {
2667*fdd8201dSApple OSS Distributions 	uint32_t apply_worker_index;
2668*fdd8201dSApple OSS Distributions 	kern_return_t kr;
2669*fdd8201dSApple OSS Distributions 
2670*fdd8201dSApple OSS Distributions 	switch (flags) {
2671*fdd8201dSApple OSS Distributions 	case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET:
2672*fdd8201dSApple OSS Distributions 		apply_worker_index = (uint32_t)value1;
2673*fdd8201dSApple OSS Distributions 		kr = thread_shared_rsrc_policy_set(thread, apply_worker_index, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2674*fdd8201dSApple OSS Distributions 		/*
2675*fdd8201dSApple OSS Distributions 		 * KERN_INVALID_POLICY indicates that the thread was trying to bind to a
2676*fdd8201dSApple OSS Distributions 		 * cluster which it was not eligible to execute on.
2677*fdd8201dSApple OSS Distributions 		 */
2678*fdd8201dSApple OSS Distributions 		return (kr == KERN_SUCCESS) ? 0 : ((kr == KERN_INVALID_POLICY) ? ENOTSUP : EINVAL);
2679*fdd8201dSApple OSS Distributions 	case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR:
2680*fdd8201dSApple OSS Distributions 		kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2681*fdd8201dSApple OSS Distributions 		return (kr == KERN_SUCCESS) ? 0 : EINVAL;
2682*fdd8201dSApple OSS Distributions 	default:
2683*fdd8201dSApple OSS Distributions 		return EINVAL;
2684*fdd8201dSApple OSS Distributions 	}
2685*fdd8201dSApple OSS Distributions }
2686*fdd8201dSApple OSS Distributions 
2687*fdd8201dSApple OSS Distributions #define ENSURE_UNUSED(arg) \
2688*fdd8201dSApple OSS Distributions 	        ({ if ((arg) != 0) { return EINVAL; } })
2689*fdd8201dSApple OSS Distributions 
2690*fdd8201dSApple OSS Distributions int
bsdthread_ctl(struct proc * p,struct bsdthread_ctl_args * uap,int * retval)2691*fdd8201dSApple OSS Distributions bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
2692*fdd8201dSApple OSS Distributions {
2693*fdd8201dSApple OSS Distributions 	switch (uap->cmd) {
2694*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_OVERRIDE_START:
2695*fdd8201dSApple OSS Distributions 		return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
2696*fdd8201dSApple OSS Distributions 		           (pthread_priority_t)uap->arg2, uap->arg3);
2697*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_OVERRIDE_END:
2698*fdd8201dSApple OSS Distributions 		ENSURE_UNUSED(uap->arg3);
2699*fdd8201dSApple OSS Distributions 		return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
2700*fdd8201dSApple OSS Distributions 		           (user_addr_t)uap->arg2);
2701*fdd8201dSApple OSS Distributions 
2702*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
2703*fdd8201dSApple OSS Distributions 		return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
2704*fdd8201dSApple OSS Distributions 		           (pthread_priority_t)uap->arg2, uap->arg3);
2705*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
2706*fdd8201dSApple OSS Distributions 		return workq_thread_reset_dispatch_override(p, current_thread());
2707*fdd8201dSApple OSS Distributions 
2708*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_SET_SELF:
2709*fdd8201dSApple OSS Distributions 		return bsdthread_set_self(p, current_thread(),
2710*fdd8201dSApple OSS Distributions 		           (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
2711*fdd8201dSApple OSS Distributions 		           (enum workq_set_self_flags)uap->arg3);
2712*fdd8201dSApple OSS Distributions 
2713*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
2714*fdd8201dSApple OSS Distributions 		ENSURE_UNUSED(uap->arg3);
2715*fdd8201dSApple OSS Distributions 		return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
2716*fdd8201dSApple OSS Distributions 		           (unsigned long)uap->arg2, retval);
2717*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
2718*fdd8201dSApple OSS Distributions 		ENSURE_UNUSED(uap->arg2);
2719*fdd8201dSApple OSS Distributions 		ENSURE_UNUSED(uap->arg3);
2720*fdd8201dSApple OSS Distributions 		return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
2721*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_DISPATCH_APPLY_ATTR:
2722*fdd8201dSApple OSS Distributions 		return bsdthread_dispatch_apply_attr(p, current_thread(),
2723*fdd8201dSApple OSS Distributions 		           (unsigned long)uap->arg1, (uint64_t)uap->arg2,
2724*fdd8201dSApple OSS Distributions 		           (uint64_t)uap->arg3);
2725*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_SET_QOS:
2726*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
2727*fdd8201dSApple OSS Distributions 	case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
2728*fdd8201dSApple OSS Distributions 		/* no longer supported */
2729*fdd8201dSApple OSS Distributions 		return ENOTSUP;
2730*fdd8201dSApple OSS Distributions 
2731*fdd8201dSApple OSS Distributions 	default:
2732*fdd8201dSApple OSS Distributions 		return EINVAL;
2733*fdd8201dSApple OSS Distributions 	}
2734*fdd8201dSApple OSS Distributions }
2735*fdd8201dSApple OSS Distributions 
2736*fdd8201dSApple OSS Distributions #pragma mark workqueue thread manipulation
2737*fdd8201dSApple OSS Distributions 
2738*fdd8201dSApple OSS Distributions static void __dead2
2739*fdd8201dSApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2740*fdd8201dSApple OSS Distributions     struct uthread *uth, uint32_t setup_flags);
2741*fdd8201dSApple OSS Distributions 
2742*fdd8201dSApple OSS Distributions static void __dead2
2743*fdd8201dSApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2744*fdd8201dSApple OSS Distributions     struct uthread *uth, uint32_t setup_flags);
2745*fdd8201dSApple OSS Distributions 
2746*fdd8201dSApple OSS Distributions static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
2747*fdd8201dSApple OSS Distributions 
2748*fdd8201dSApple OSS Distributions #if KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD
2749*fdd8201dSApple OSS Distributions static inline uint64_t
workq_trace_req_id(workq_threadreq_t req)2750*fdd8201dSApple OSS Distributions workq_trace_req_id(workq_threadreq_t req)
2751*fdd8201dSApple OSS Distributions {
2752*fdd8201dSApple OSS Distributions 	struct kqworkloop *kqwl;
2753*fdd8201dSApple OSS Distributions 	if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2754*fdd8201dSApple OSS Distributions 		kqwl = __container_of(req, struct kqworkloop, kqwl_request);
2755*fdd8201dSApple OSS Distributions 		return kqwl->kqwl_dynamicid;
2756*fdd8201dSApple OSS Distributions 	}
2757*fdd8201dSApple OSS Distributions 
2758*fdd8201dSApple OSS Distributions 	return VM_KERNEL_ADDRHIDE(req);
2759*fdd8201dSApple OSS Distributions }
2760*fdd8201dSApple OSS Distributions #endif
2761*fdd8201dSApple OSS Distributions 
2762*fdd8201dSApple OSS Distributions /**
2763*fdd8201dSApple OSS Distributions  * Entry point for libdispatch to ask for threads
2764*fdd8201dSApple OSS Distributions  */
2765*fdd8201dSApple OSS Distributions static int
workq_reqthreads(struct proc * p,uint32_t reqcount,pthread_priority_t pp,bool cooperative)2766*fdd8201dSApple OSS Distributions workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp, bool cooperative)
2767*fdd8201dSApple OSS Distributions {
2768*fdd8201dSApple OSS Distributions 	thread_qos_t qos = _pthread_priority_thread_qos(pp);
2769*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
2770*fdd8201dSApple OSS Distributions 	uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
2771*fdd8201dSApple OSS Distributions 	int ret = 0;
2772*fdd8201dSApple OSS Distributions 
2773*fdd8201dSApple OSS Distributions 	if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
2774*fdd8201dSApple OSS Distributions 	    qos == THREAD_QOS_UNSPECIFIED) {
2775*fdd8201dSApple OSS Distributions 		ret = EINVAL;
2776*fdd8201dSApple OSS Distributions 		goto exit;
2777*fdd8201dSApple OSS Distributions 	}
2778*fdd8201dSApple OSS Distributions 
2779*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
2780*fdd8201dSApple OSS Distributions 	    wq, reqcount, pp, cooperative);
2781*fdd8201dSApple OSS Distributions 
2782*fdd8201dSApple OSS Distributions 	workq_threadreq_t req = zalloc(workq_zone_threadreq);
2783*fdd8201dSApple OSS Distributions 	priority_queue_entry_init(&req->tr_entry);
2784*fdd8201dSApple OSS Distributions 	req->tr_state = WORKQ_TR_STATE_NEW;
2785*fdd8201dSApple OSS Distributions 	req->tr_qos   = qos;
2786*fdd8201dSApple OSS Distributions 	workq_tr_flags_t tr_flags = 0;
2787*fdd8201dSApple OSS Distributions 
2788*fdd8201dSApple OSS Distributions 	if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
2789*fdd8201dSApple OSS Distributions 		tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
2790*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
2791*fdd8201dSApple OSS Distributions 	}
2792*fdd8201dSApple OSS Distributions 
2793*fdd8201dSApple OSS Distributions 	if (cooperative) {
2794*fdd8201dSApple OSS Distributions 		tr_flags |= WORKQ_TR_FLAG_COOPERATIVE;
2795*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
2796*fdd8201dSApple OSS Distributions 
2797*fdd8201dSApple OSS Distributions 		if (reqcount > 1) {
2798*fdd8201dSApple OSS Distributions 			ret = ENOTSUP;
2799*fdd8201dSApple OSS Distributions 			goto free_and_exit;
2800*fdd8201dSApple OSS Distributions 		}
2801*fdd8201dSApple OSS Distributions 	}
2802*fdd8201dSApple OSS Distributions 
2803*fdd8201dSApple OSS Distributions 	/* A thread request cannot be both overcommit and cooperative */
2804*fdd8201dSApple OSS Distributions 	if (workq_tr_is_cooperative(tr_flags) &&
2805*fdd8201dSApple OSS Distributions 	    workq_tr_is_overcommit(tr_flags)) {
2806*fdd8201dSApple OSS Distributions 		ret = EINVAL;
2807*fdd8201dSApple OSS Distributions 		goto free_and_exit;
2808*fdd8201dSApple OSS Distributions 	}
2809*fdd8201dSApple OSS Distributions 	req->tr_flags = tr_flags;
2810*fdd8201dSApple OSS Distributions 
2811*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
2812*fdd8201dSApple OSS Distributions 	    wq, workq_trace_req_id(req), req->tr_qos, reqcount);
2813*fdd8201dSApple OSS Distributions 
2814*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
2815*fdd8201dSApple OSS Distributions 	do {
2816*fdd8201dSApple OSS Distributions 		if (_wq_exiting(wq)) {
2817*fdd8201dSApple OSS Distributions 			goto unlock_and_exit;
2818*fdd8201dSApple OSS Distributions 		}
2819*fdd8201dSApple OSS Distributions 
2820*fdd8201dSApple OSS Distributions 		/*
2821*fdd8201dSApple OSS Distributions 		 * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
2822*fdd8201dSApple OSS Distributions 		 * threads without pacing, to inform the scheduler of that workload.
2823*fdd8201dSApple OSS Distributions 		 *
2824*fdd8201dSApple OSS Distributions 		 * The last requests, or the ones that failed the admission checks are
2825*fdd8201dSApple OSS Distributions 		 * enqueued and go through the regular creator codepath.
2826*fdd8201dSApple OSS Distributions 		 *
2827*fdd8201dSApple OSS Distributions 		 * If there aren't enough threads, add one, but re-evaluate everything
2828*fdd8201dSApple OSS Distributions 		 * as conditions may now have changed.
2829*fdd8201dSApple OSS Distributions 		 */
2830*fdd8201dSApple OSS Distributions 		unpaced = reqcount - 1;
2831*fdd8201dSApple OSS Distributions 
2832*fdd8201dSApple OSS Distributions 		if (reqcount > 1) {
2833*fdd8201dSApple OSS Distributions 			/* We don't handle asking for parallelism on the cooperative
2834*fdd8201dSApple OSS Distributions 			 * workqueue just yet */
2835*fdd8201dSApple OSS Distributions 			assert(!workq_threadreq_is_cooperative(req));
2836*fdd8201dSApple OSS Distributions 
2837*fdd8201dSApple OSS Distributions 			if (workq_threadreq_is_nonovercommit(req)) {
2838*fdd8201dSApple OSS Distributions 				unpaced = workq_constrained_allowance(wq, qos, NULL, false);
2839*fdd8201dSApple OSS Distributions 				if (unpaced >= reqcount - 1) {
2840*fdd8201dSApple OSS Distributions 					unpaced = reqcount - 1;
2841*fdd8201dSApple OSS Distributions 				}
2842*fdd8201dSApple OSS Distributions 			}
2843*fdd8201dSApple OSS Distributions 		}
2844*fdd8201dSApple OSS Distributions 
2845*fdd8201dSApple OSS Distributions 		/*
2846*fdd8201dSApple OSS Distributions 		 * This path does not currently handle custom workloop parameters
2847*fdd8201dSApple OSS Distributions 		 * when creating threads for parallelism.
2848*fdd8201dSApple OSS Distributions 		 */
2849*fdd8201dSApple OSS Distributions 		assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
2850*fdd8201dSApple OSS Distributions 
2851*fdd8201dSApple OSS Distributions 		/*
2852*fdd8201dSApple OSS Distributions 		 * This is a trimmed down version of workq_threadreq_bind_and_unlock()
2853*fdd8201dSApple OSS Distributions 		 */
2854*fdd8201dSApple OSS Distributions 		while (unpaced > 0 && wq->wq_thidlecount) {
2855*fdd8201dSApple OSS Distributions 			struct uthread *uth;
2856*fdd8201dSApple OSS Distributions 			bool needs_wakeup;
2857*fdd8201dSApple OSS Distributions 			uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
2858*fdd8201dSApple OSS Distributions 
2859*fdd8201dSApple OSS Distributions 			if (workq_tr_is_overcommit(req->tr_flags)) {
2860*fdd8201dSApple OSS Distributions 				uu_flags |= UT_WORKQ_OVERCOMMIT;
2861*fdd8201dSApple OSS Distributions 			}
2862*fdd8201dSApple OSS Distributions 
2863*fdd8201dSApple OSS Distributions 			uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
2864*fdd8201dSApple OSS Distributions 
2865*fdd8201dSApple OSS Distributions 			_wq_thactive_inc(wq, qos);
2866*fdd8201dSApple OSS Distributions 			wq->wq_thscheduled_count[_wq_bucket(qos)]++;
2867*fdd8201dSApple OSS Distributions 			workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
2868*fdd8201dSApple OSS Distributions 			wq->wq_fulfilled++;
2869*fdd8201dSApple OSS Distributions 
2870*fdd8201dSApple OSS Distributions 			uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
2871*fdd8201dSApple OSS Distributions 			uth->uu_save.uus_workq_park_data.thread_request = req;
2872*fdd8201dSApple OSS Distributions 			if (needs_wakeup) {
2873*fdd8201dSApple OSS Distributions 				workq_thread_wakeup(uth);
2874*fdd8201dSApple OSS Distributions 			}
2875*fdd8201dSApple OSS Distributions 			unpaced--;
2876*fdd8201dSApple OSS Distributions 			reqcount--;
2877*fdd8201dSApple OSS Distributions 		}
2878*fdd8201dSApple OSS Distributions 	} while (unpaced && wq->wq_nthreads < wq_max_threads &&
2879*fdd8201dSApple OSS Distributions 	    workq_add_new_idle_thread(p, wq));
2880*fdd8201dSApple OSS Distributions 
2881*fdd8201dSApple OSS Distributions 	if (_wq_exiting(wq)) {
2882*fdd8201dSApple OSS Distributions 		goto unlock_and_exit;
2883*fdd8201dSApple OSS Distributions 	}
2884*fdd8201dSApple OSS Distributions 
2885*fdd8201dSApple OSS Distributions 	req->tr_count = (uint16_t)reqcount;
2886*fdd8201dSApple OSS Distributions 	if (workq_threadreq_enqueue(wq, req)) {
2887*fdd8201dSApple OSS Distributions 		/* This can drop the workqueue lock, and take it again */
2888*fdd8201dSApple OSS Distributions 		workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
2889*fdd8201dSApple OSS Distributions 	}
2890*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
2891*fdd8201dSApple OSS Distributions 	return 0;
2892*fdd8201dSApple OSS Distributions 
2893*fdd8201dSApple OSS Distributions unlock_and_exit:
2894*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
2895*fdd8201dSApple OSS Distributions free_and_exit:
2896*fdd8201dSApple OSS Distributions 	zfree(workq_zone_threadreq, req);
2897*fdd8201dSApple OSS Distributions exit:
2898*fdd8201dSApple OSS Distributions 	return ret;
2899*fdd8201dSApple OSS Distributions }
2900*fdd8201dSApple OSS Distributions 
2901*fdd8201dSApple OSS Distributions bool
workq_kern_threadreq_initiate(struct proc * p,workq_threadreq_t req,struct turnstile * workloop_ts,thread_qos_t qos,workq_kern_threadreq_flags_t flags)2902*fdd8201dSApple OSS Distributions workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
2903*fdd8201dSApple OSS Distributions     struct turnstile *workloop_ts, thread_qos_t qos,
2904*fdd8201dSApple OSS Distributions     workq_kern_threadreq_flags_t flags)
2905*fdd8201dSApple OSS Distributions {
2906*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
2907*fdd8201dSApple OSS Distributions 	struct uthread *uth = NULL;
2908*fdd8201dSApple OSS Distributions 
2909*fdd8201dSApple OSS Distributions 	assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
2910*fdd8201dSApple OSS Distributions 
2911*fdd8201dSApple OSS Distributions 	if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
2912*fdd8201dSApple OSS Distributions 		workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
2913*fdd8201dSApple OSS Distributions 		qos = thread_workq_qos_for_pri(trp.trp_pri);
2914*fdd8201dSApple OSS Distributions 		if (qos == THREAD_QOS_UNSPECIFIED) {
2915*fdd8201dSApple OSS Distributions 			qos = WORKQ_THREAD_QOS_ABOVEUI;
2916*fdd8201dSApple OSS Distributions 		}
2917*fdd8201dSApple OSS Distributions 	}
2918*fdd8201dSApple OSS Distributions 
2919*fdd8201dSApple OSS Distributions 	assert(req->tr_state == WORKQ_TR_STATE_IDLE);
2920*fdd8201dSApple OSS Distributions 	priority_queue_entry_init(&req->tr_entry);
2921*fdd8201dSApple OSS Distributions 	req->tr_count = 1;
2922*fdd8201dSApple OSS Distributions 	req->tr_state = WORKQ_TR_STATE_NEW;
2923*fdd8201dSApple OSS Distributions 	req->tr_qos   = qos;
2924*fdd8201dSApple OSS Distributions 
2925*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
2926*fdd8201dSApple OSS Distributions 	    workq_trace_req_id(req), qos, 1);
2927*fdd8201dSApple OSS Distributions 
2928*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
2929*fdd8201dSApple OSS Distributions 		/*
2930*fdd8201dSApple OSS Distributions 		 * we're called back synchronously from the context of
2931*fdd8201dSApple OSS Distributions 		 * kqueue_threadreq_unbind from within workq_thread_return()
2932*fdd8201dSApple OSS Distributions 		 * we can try to match up this thread with this request !
2933*fdd8201dSApple OSS Distributions 		 */
2934*fdd8201dSApple OSS Distributions 		uth = current_uthread();
2935*fdd8201dSApple OSS Distributions 		assert(uth->uu_kqr_bound == NULL);
2936*fdd8201dSApple OSS Distributions 	}
2937*fdd8201dSApple OSS Distributions 
2938*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
2939*fdd8201dSApple OSS Distributions 	if (_wq_exiting(wq)) {
2940*fdd8201dSApple OSS Distributions 		req->tr_state = WORKQ_TR_STATE_IDLE;
2941*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
2942*fdd8201dSApple OSS Distributions 		return false;
2943*fdd8201dSApple OSS Distributions 	}
2944*fdd8201dSApple OSS Distributions 
2945*fdd8201dSApple OSS Distributions 	if (uth && workq_threadreq_admissible(wq, uth, req)) {
2946*fdd8201dSApple OSS Distributions 		/* This is the case of the rebind - we were about to park and unbind
2947*fdd8201dSApple OSS Distributions 		 * when more events came so keep the binding.
2948*fdd8201dSApple OSS Distributions 		 */
2949*fdd8201dSApple OSS Distributions 		assert(uth != wq->wq_creator);
2950*fdd8201dSApple OSS Distributions 
2951*fdd8201dSApple OSS Distributions 		if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
2952*fdd8201dSApple OSS Distributions 			_wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
2953*fdd8201dSApple OSS Distributions 			workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
2954*fdd8201dSApple OSS Distributions 		}
2955*fdd8201dSApple OSS Distributions 		/*
2956*fdd8201dSApple OSS Distributions 		 * We're called from workq_kern_threadreq_initiate()
2957*fdd8201dSApple OSS Distributions 		 * due to an unbind, with the kq req held.
2958*fdd8201dSApple OSS Distributions 		 */
2959*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
2960*fdd8201dSApple OSS Distributions 		    workq_trace_req_id(req), req->tr_flags, 0);
2961*fdd8201dSApple OSS Distributions 		wq->wq_fulfilled++;
2962*fdd8201dSApple OSS Distributions 
2963*fdd8201dSApple OSS Distributions 		kqueue_threadreq_bind(p, req, get_machthread(uth), 0);
2964*fdd8201dSApple OSS Distributions 	} else {
2965*fdd8201dSApple OSS Distributions 		if (workloop_ts) {
2966*fdd8201dSApple OSS Distributions 			workq_perform_turnstile_operation_locked(wq, ^{
2967*fdd8201dSApple OSS Distributions 				turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
2968*fdd8201dSApple OSS Distributions 				TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
2969*fdd8201dSApple OSS Distributions 				turnstile_update_inheritor_complete(workloop_ts,
2970*fdd8201dSApple OSS Distributions 				TURNSTILE_INTERLOCK_HELD);
2971*fdd8201dSApple OSS Distributions 			});
2972*fdd8201dSApple OSS Distributions 		}
2973*fdd8201dSApple OSS Distributions 
2974*fdd8201dSApple OSS Distributions 		bool reevaluate_creator_thread_group = false;
2975*fdd8201dSApple OSS Distributions #if CONFIG_PREADOPT_TG
2976*fdd8201dSApple OSS Distributions 		reevaluate_creator_thread_group = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
2977*fdd8201dSApple OSS Distributions #endif
2978*fdd8201dSApple OSS Distributions 		/* We enqueued the highest priority item or we may need to reevaluate if
2979*fdd8201dSApple OSS Distributions 		 * the creator needs a thread group pre-adoption */
2980*fdd8201dSApple OSS Distributions 		if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_thread_group) {
2981*fdd8201dSApple OSS Distributions 			workq_schedule_creator(p, wq, flags);
2982*fdd8201dSApple OSS Distributions 		}
2983*fdd8201dSApple OSS Distributions 	}
2984*fdd8201dSApple OSS Distributions 
2985*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
2986*fdd8201dSApple OSS Distributions 
2987*fdd8201dSApple OSS Distributions 	return true;
2988*fdd8201dSApple OSS Distributions }
2989*fdd8201dSApple OSS Distributions 
2990*fdd8201dSApple OSS Distributions void
workq_kern_threadreq_modify(struct proc * p,workq_threadreq_t req,thread_qos_t qos,workq_kern_threadreq_flags_t flags)2991*fdd8201dSApple OSS Distributions workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
2992*fdd8201dSApple OSS Distributions     thread_qos_t qos, workq_kern_threadreq_flags_t flags)
2993*fdd8201dSApple OSS Distributions {
2994*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
2995*fdd8201dSApple OSS Distributions 	bool make_overcommit = false;
2996*fdd8201dSApple OSS Distributions 
2997*fdd8201dSApple OSS Distributions 	if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
2998*fdd8201dSApple OSS Distributions 		/* Requests outside-of-QoS shouldn't accept modify operations */
2999*fdd8201dSApple OSS Distributions 		return;
3000*fdd8201dSApple OSS Distributions 	}
3001*fdd8201dSApple OSS Distributions 
3002*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
3003*fdd8201dSApple OSS Distributions 
3004*fdd8201dSApple OSS Distributions 	assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3005*fdd8201dSApple OSS Distributions 	assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
3006*fdd8201dSApple OSS Distributions 
3007*fdd8201dSApple OSS Distributions 	if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3008*fdd8201dSApple OSS Distributions 		kqueue_threadreq_bind(p, req, req->tr_thread, 0);
3009*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
3010*fdd8201dSApple OSS Distributions 		return;
3011*fdd8201dSApple OSS Distributions 	}
3012*fdd8201dSApple OSS Distributions 
3013*fdd8201dSApple OSS Distributions 	if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
3014*fdd8201dSApple OSS Distributions 		/* TODO (rokhinip): We come into this code path for kqwl thread
3015*fdd8201dSApple OSS Distributions 		 * requests. kqwl requests cannot be cooperative.
3016*fdd8201dSApple OSS Distributions 		 */
3017*fdd8201dSApple OSS Distributions 		assert(!workq_threadreq_is_cooperative(req));
3018*fdd8201dSApple OSS Distributions 
3019*fdd8201dSApple OSS Distributions 		make_overcommit = workq_threadreq_is_nonovercommit(req);
3020*fdd8201dSApple OSS Distributions 	}
3021*fdd8201dSApple OSS Distributions 
3022*fdd8201dSApple OSS Distributions 	if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
3023*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
3024*fdd8201dSApple OSS Distributions 		return;
3025*fdd8201dSApple OSS Distributions 	}
3026*fdd8201dSApple OSS Distributions 
3027*fdd8201dSApple OSS Distributions 	assert(req->tr_count == 1);
3028*fdd8201dSApple OSS Distributions 	if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3029*fdd8201dSApple OSS Distributions 		panic("Invalid thread request (%p) state %d", req, req->tr_state);
3030*fdd8201dSApple OSS Distributions 	}
3031*fdd8201dSApple OSS Distributions 
3032*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
3033*fdd8201dSApple OSS Distributions 	    workq_trace_req_id(req), qos, 0);
3034*fdd8201dSApple OSS Distributions 
3035*fdd8201dSApple OSS Distributions 	struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
3036*fdd8201dSApple OSS Distributions 	workq_threadreq_t req_max;
3037*fdd8201dSApple OSS Distributions 
3038*fdd8201dSApple OSS Distributions 	/*
3039*fdd8201dSApple OSS Distributions 	 * Stage 1: Dequeue the request from its priority queue.
3040*fdd8201dSApple OSS Distributions 	 *
3041*fdd8201dSApple OSS Distributions 	 * If we dequeue the root item of the constrained priority queue,
3042*fdd8201dSApple OSS Distributions 	 * maintain the best constrained request qos invariant.
3043*fdd8201dSApple OSS Distributions 	 */
3044*fdd8201dSApple OSS Distributions 	if (priority_queue_remove(pq, &req->tr_entry)) {
3045*fdd8201dSApple OSS Distributions 		if (workq_threadreq_is_nonovercommit(req)) {
3046*fdd8201dSApple OSS Distributions 			_wq_thactive_refresh_best_constrained_req_qos(wq);
3047*fdd8201dSApple OSS Distributions 		}
3048*fdd8201dSApple OSS Distributions 	}
3049*fdd8201dSApple OSS Distributions 
3050*fdd8201dSApple OSS Distributions 	/*
3051*fdd8201dSApple OSS Distributions 	 * Stage 2: Apply changes to the thread request
3052*fdd8201dSApple OSS Distributions 	 *
3053*fdd8201dSApple OSS Distributions 	 * If the item will not become the root of the priority queue it belongs to,
3054*fdd8201dSApple OSS Distributions 	 * then we need to wait in line, just enqueue and return quickly.
3055*fdd8201dSApple OSS Distributions 	 */
3056*fdd8201dSApple OSS Distributions 	if (__improbable(make_overcommit)) {
3057*fdd8201dSApple OSS Distributions 		req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
3058*fdd8201dSApple OSS Distributions 		pq = workq_priority_queue_for_req(wq, req);
3059*fdd8201dSApple OSS Distributions 	}
3060*fdd8201dSApple OSS Distributions 	req->tr_qos = qos;
3061*fdd8201dSApple OSS Distributions 
3062*fdd8201dSApple OSS Distributions 	req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
3063*fdd8201dSApple OSS Distributions 	if (req_max && req_max->tr_qos >= qos) {
3064*fdd8201dSApple OSS Distributions 		priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
3065*fdd8201dSApple OSS Distributions 		    workq_priority_for_req(req), false);
3066*fdd8201dSApple OSS Distributions 		priority_queue_insert(pq, &req->tr_entry);
3067*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
3068*fdd8201dSApple OSS Distributions 		return;
3069*fdd8201dSApple OSS Distributions 	}
3070*fdd8201dSApple OSS Distributions 
3071*fdd8201dSApple OSS Distributions 	/*
3072*fdd8201dSApple OSS Distributions 	 * Stage 3: Reevaluate whether we should run the thread request.
3073*fdd8201dSApple OSS Distributions 	 *
3074*fdd8201dSApple OSS Distributions 	 * Pretend the thread request is new again:
3075*fdd8201dSApple OSS Distributions 	 * - adjust wq_reqcount to not count it anymore.
3076*fdd8201dSApple OSS Distributions 	 * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
3077*fdd8201dSApple OSS Distributions 	 *   properly attempts a synchronous bind)
3078*fdd8201dSApple OSS Distributions 	 */
3079*fdd8201dSApple OSS Distributions 	wq->wq_reqcount--;
3080*fdd8201dSApple OSS Distributions 	req->tr_state = WORKQ_TR_STATE_NEW;
3081*fdd8201dSApple OSS Distributions 
3082*fdd8201dSApple OSS Distributions 	/* We enqueued the highest priority item or we may need to reevaluate if
3083*fdd8201dSApple OSS Distributions 	 * the creator needs a thread group pre-adoption if the request got a new TG */
3084*fdd8201dSApple OSS Distributions 	bool reevaluate_creator_tg = false;
3085*fdd8201dSApple OSS Distributions 
3086*fdd8201dSApple OSS Distributions #if CONFIG_PREADOPT_TG
3087*fdd8201dSApple OSS Distributions 	reevaluate_creator_tg = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3088*fdd8201dSApple OSS Distributions #endif
3089*fdd8201dSApple OSS Distributions 
3090*fdd8201dSApple OSS Distributions 	if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_tg) {
3091*fdd8201dSApple OSS Distributions 		workq_schedule_creator(p, wq, flags);
3092*fdd8201dSApple OSS Distributions 	}
3093*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
3094*fdd8201dSApple OSS Distributions }
3095*fdd8201dSApple OSS Distributions 
3096*fdd8201dSApple OSS Distributions void
workq_kern_threadreq_lock(struct proc * p)3097*fdd8201dSApple OSS Distributions workq_kern_threadreq_lock(struct proc *p)
3098*fdd8201dSApple OSS Distributions {
3099*fdd8201dSApple OSS Distributions 	workq_lock_spin(proc_get_wqptr_fast(p));
3100*fdd8201dSApple OSS Distributions }
3101*fdd8201dSApple OSS Distributions 
3102*fdd8201dSApple OSS Distributions void
workq_kern_threadreq_unlock(struct proc * p)3103*fdd8201dSApple OSS Distributions workq_kern_threadreq_unlock(struct proc *p)
3104*fdd8201dSApple OSS Distributions {
3105*fdd8201dSApple OSS Distributions 	workq_unlock(proc_get_wqptr_fast(p));
3106*fdd8201dSApple OSS Distributions }
3107*fdd8201dSApple OSS Distributions 
3108*fdd8201dSApple OSS Distributions void
workq_kern_threadreq_update_inheritor(struct proc * p,workq_threadreq_t req,thread_t owner,struct turnstile * wl_ts,turnstile_update_flags_t flags)3109*fdd8201dSApple OSS Distributions workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
3110*fdd8201dSApple OSS Distributions     thread_t owner, struct turnstile *wl_ts,
3111*fdd8201dSApple OSS Distributions     turnstile_update_flags_t flags)
3112*fdd8201dSApple OSS Distributions {
3113*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
3114*fdd8201dSApple OSS Distributions 	turnstile_inheritor_t inheritor;
3115*fdd8201dSApple OSS Distributions 
3116*fdd8201dSApple OSS Distributions 	assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3117*fdd8201dSApple OSS Distributions 	assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
3118*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
3119*fdd8201dSApple OSS Distributions 
3120*fdd8201dSApple OSS Distributions 	if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3121*fdd8201dSApple OSS Distributions 		kqueue_threadreq_bind(p, req, req->tr_thread,
3122*fdd8201dSApple OSS Distributions 		    KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE);
3123*fdd8201dSApple OSS Distributions 		return;
3124*fdd8201dSApple OSS Distributions 	}
3125*fdd8201dSApple OSS Distributions 
3126*fdd8201dSApple OSS Distributions 	if (_wq_exiting(wq)) {
3127*fdd8201dSApple OSS Distributions 		inheritor = TURNSTILE_INHERITOR_NULL;
3128*fdd8201dSApple OSS Distributions 	} else {
3129*fdd8201dSApple OSS Distributions 		if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3130*fdd8201dSApple OSS Distributions 			panic("Invalid thread request (%p) state %d", req, req->tr_state);
3131*fdd8201dSApple OSS Distributions 		}
3132*fdd8201dSApple OSS Distributions 
3133*fdd8201dSApple OSS Distributions 		if (owner) {
3134*fdd8201dSApple OSS Distributions 			inheritor = owner;
3135*fdd8201dSApple OSS Distributions 			flags |= TURNSTILE_INHERITOR_THREAD;
3136*fdd8201dSApple OSS Distributions 		} else {
3137*fdd8201dSApple OSS Distributions 			inheritor = wq->wq_turnstile;
3138*fdd8201dSApple OSS Distributions 			flags |= TURNSTILE_INHERITOR_TURNSTILE;
3139*fdd8201dSApple OSS Distributions 		}
3140*fdd8201dSApple OSS Distributions 	}
3141*fdd8201dSApple OSS Distributions 
3142*fdd8201dSApple OSS Distributions 	workq_perform_turnstile_operation_locked(wq, ^{
3143*fdd8201dSApple OSS Distributions 		turnstile_update_inheritor(wl_ts, inheritor, flags);
3144*fdd8201dSApple OSS Distributions 	});
3145*fdd8201dSApple OSS Distributions }
3146*fdd8201dSApple OSS Distributions 
3147*fdd8201dSApple OSS Distributions void
workq_kern_threadreq_redrive(struct proc * p,workq_kern_threadreq_flags_t flags)3148*fdd8201dSApple OSS Distributions workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
3149*fdd8201dSApple OSS Distributions {
3150*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
3151*fdd8201dSApple OSS Distributions 
3152*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
3153*fdd8201dSApple OSS Distributions 	workq_schedule_creator(p, wq, flags);
3154*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
3155*fdd8201dSApple OSS Distributions }
3156*fdd8201dSApple OSS Distributions 
3157*fdd8201dSApple OSS Distributions /*
3158*fdd8201dSApple OSS Distributions  * Always called at AST by the thread on itself
3159*fdd8201dSApple OSS Distributions  *
3160*fdd8201dSApple OSS Distributions  * Upon quantum expiry, the workqueue subsystem evaluates its state and decides
3161*fdd8201dSApple OSS Distributions  * on what the thread should do next. The TSD value is always set by the thread
3162*fdd8201dSApple OSS Distributions  * on itself in the kernel and cleared either by userspace when it acks the TSD
3163*fdd8201dSApple OSS Distributions  * value and takes action, or by the thread in the kernel when the quantum
3164*fdd8201dSApple OSS Distributions  * expires again.
3165*fdd8201dSApple OSS Distributions  */
3166*fdd8201dSApple OSS Distributions void
workq_kern_quantum_expiry_reevaluate(proc_t proc,thread_t thread)3167*fdd8201dSApple OSS Distributions workq_kern_quantum_expiry_reevaluate(proc_t proc, thread_t thread)
3168*fdd8201dSApple OSS Distributions {
3169*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(thread);
3170*fdd8201dSApple OSS Distributions 
3171*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3172*fdd8201dSApple OSS Distributions 		return;
3173*fdd8201dSApple OSS Distributions 	}
3174*fdd8201dSApple OSS Distributions 
3175*fdd8201dSApple OSS Distributions 	if (!thread_supports_cooperative_workqueue(thread)) {
3176*fdd8201dSApple OSS Distributions 		panic("Quantum expired for thread that doesn't support cooperative workqueue");
3177*fdd8201dSApple OSS Distributions 	}
3178*fdd8201dSApple OSS Distributions 
3179*fdd8201dSApple OSS Distributions 	thread_qos_t qos = uth->uu_workq_pri.qos_bucket;
3180*fdd8201dSApple OSS Distributions 	if (qos == THREAD_QOS_UNSPECIFIED) {
3181*fdd8201dSApple OSS Distributions 		panic("Thread should not have workq bucket of QoS UN");
3182*fdd8201dSApple OSS Distributions 	}
3183*fdd8201dSApple OSS Distributions 
3184*fdd8201dSApple OSS Distributions 	assert(thread_has_expired_workqueue_quantum(thread, false));
3185*fdd8201dSApple OSS Distributions 
3186*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(proc);
3187*fdd8201dSApple OSS Distributions 	assert(wq != NULL);
3188*fdd8201dSApple OSS Distributions 
3189*fdd8201dSApple OSS Distributions 	/*
3190*fdd8201dSApple OSS Distributions 	 * For starters, we're just going to evaluate and see if we need to narrow
3191*fdd8201dSApple OSS Distributions 	 * the pool and tell this thread to park if needed. In the future, we'll
3192*fdd8201dSApple OSS Distributions 	 * evaluate and convey other workqueue state information like needing to
3193*fdd8201dSApple OSS Distributions 	 * pump kevents, etc.
3194*fdd8201dSApple OSS Distributions 	 */
3195*fdd8201dSApple OSS Distributions 	uint64_t flags = 0;
3196*fdd8201dSApple OSS Distributions 
3197*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
3198*fdd8201dSApple OSS Distributions 
3199*fdd8201dSApple OSS Distributions 	if (workq_thread_is_cooperative(uth)) {
3200*fdd8201dSApple OSS Distributions 		if (!workq_cooperative_allowance(wq, qos, uth, false)) {
3201*fdd8201dSApple OSS Distributions 			flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3202*fdd8201dSApple OSS Distributions 		} else {
3203*fdd8201dSApple OSS Distributions 			/* In the future, when we have kevent hookups for the cooperative
3204*fdd8201dSApple OSS Distributions 			 * pool, we need fancier logic for what userspace should do. But
3205*fdd8201dSApple OSS Distributions 			 * right now, only userspace thread requests exist - so we'll just
3206*fdd8201dSApple OSS Distributions 			 * tell userspace to shuffle work items */
3207*fdd8201dSApple OSS Distributions 			flags |= PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE;
3208*fdd8201dSApple OSS Distributions 		}
3209*fdd8201dSApple OSS Distributions 	} else if (workq_thread_is_nonovercommit(uth)) {
3210*fdd8201dSApple OSS Distributions 		if (!workq_constrained_allowance(wq, qos, uth, false)) {
3211*fdd8201dSApple OSS Distributions 			flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3212*fdd8201dSApple OSS Distributions 		}
3213*fdd8201dSApple OSS Distributions 	}
3214*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
3215*fdd8201dSApple OSS Distributions 
3216*fdd8201dSApple OSS Distributions 	WQ_TRACE(TRACE_wq_quantum_expiry_reevaluate, flags, 0, 0, 0);
3217*fdd8201dSApple OSS Distributions 
3218*fdd8201dSApple OSS Distributions 	kevent_set_workq_quantum_expiry_user_tsd(proc, thread, flags);
3219*fdd8201dSApple OSS Distributions 
3220*fdd8201dSApple OSS Distributions 	/* We have conveyed to userspace about what it needs to do upon quantum
3221*fdd8201dSApple OSS Distributions 	 * expiry, now rearm the workqueue quantum again */
3222*fdd8201dSApple OSS Distributions 	thread_arm_workqueue_quantum(get_machthread(uth));
3223*fdd8201dSApple OSS Distributions }
3224*fdd8201dSApple OSS Distributions 
3225*fdd8201dSApple OSS Distributions void
workq_schedule_creator_turnstile_redrive(struct workqueue * wq,bool locked)3226*fdd8201dSApple OSS Distributions workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
3227*fdd8201dSApple OSS Distributions {
3228*fdd8201dSApple OSS Distributions 	if (locked) {
3229*fdd8201dSApple OSS Distributions 		workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
3230*fdd8201dSApple OSS Distributions 	} else {
3231*fdd8201dSApple OSS Distributions 		workq_schedule_immediate_thread_creation(wq);
3232*fdd8201dSApple OSS Distributions 	}
3233*fdd8201dSApple OSS Distributions }
3234*fdd8201dSApple OSS Distributions 
3235*fdd8201dSApple OSS Distributions static int
workq_thread_return(struct proc * p,struct workq_kernreturn_args * uap,struct workqueue * wq)3236*fdd8201dSApple OSS Distributions workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
3237*fdd8201dSApple OSS Distributions     struct workqueue *wq)
3238*fdd8201dSApple OSS Distributions {
3239*fdd8201dSApple OSS Distributions 	thread_t th = current_thread();
3240*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(th);
3241*fdd8201dSApple OSS Distributions 	workq_threadreq_t kqr = uth->uu_kqr_bound;
3242*fdd8201dSApple OSS Distributions 	workq_threadreq_param_t trp = { };
3243*fdd8201dSApple OSS Distributions 	int nevents = uap->affinity, error;
3244*fdd8201dSApple OSS Distributions 	user_addr_t eventlist = uap->item;
3245*fdd8201dSApple OSS Distributions 
3246*fdd8201dSApple OSS Distributions 	if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3247*fdd8201dSApple OSS Distributions 	    (uth->uu_workq_flags & UT_WORKQ_DYING)) {
3248*fdd8201dSApple OSS Distributions 		return EINVAL;
3249*fdd8201dSApple OSS Distributions 	}
3250*fdd8201dSApple OSS Distributions 
3251*fdd8201dSApple OSS Distributions 	if (eventlist && nevents && kqr == NULL) {
3252*fdd8201dSApple OSS Distributions 		return EINVAL;
3253*fdd8201dSApple OSS Distributions 	}
3254*fdd8201dSApple OSS Distributions 
3255*fdd8201dSApple OSS Distributions 	/* reset signal mask on the workqueue thread to default state */
3256*fdd8201dSApple OSS Distributions 	if (uth->uu_sigmask != (sigset_t)(~workq_threadmask)) {
3257*fdd8201dSApple OSS Distributions 		proc_lock(p);
3258*fdd8201dSApple OSS Distributions 		uth->uu_sigmask = ~workq_threadmask;
3259*fdd8201dSApple OSS Distributions 		proc_unlock(p);
3260*fdd8201dSApple OSS Distributions 	}
3261*fdd8201dSApple OSS Distributions 
3262*fdd8201dSApple OSS Distributions 	if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
3263*fdd8201dSApple OSS Distributions 		/*
3264*fdd8201dSApple OSS Distributions 		 * Ensure we store the threadreq param before unbinding
3265*fdd8201dSApple OSS Distributions 		 * the kqr from this thread.
3266*fdd8201dSApple OSS Distributions 		 */
3267*fdd8201dSApple OSS Distributions 		trp = kqueue_threadreq_workloop_param(kqr);
3268*fdd8201dSApple OSS Distributions 	}
3269*fdd8201dSApple OSS Distributions 
3270*fdd8201dSApple OSS Distributions 	/*
3271*fdd8201dSApple OSS Distributions 	 * Freeze the base pri while we decide the fate of this thread.
3272*fdd8201dSApple OSS Distributions 	 *
3273*fdd8201dSApple OSS Distributions 	 * Either:
3274*fdd8201dSApple OSS Distributions 	 * - we return to user and kevent_cleanup will have unfrozen the base pri,
3275*fdd8201dSApple OSS Distributions 	 * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
3276*fdd8201dSApple OSS Distributions 	 */
3277*fdd8201dSApple OSS Distributions 	thread_freeze_base_pri(th);
3278*fdd8201dSApple OSS Distributions 
3279*fdd8201dSApple OSS Distributions 	if (kqr) {
3280*fdd8201dSApple OSS Distributions 		uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
3281*fdd8201dSApple OSS Distributions 		if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
3282*fdd8201dSApple OSS Distributions 			upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
3283*fdd8201dSApple OSS Distributions 		} else {
3284*fdd8201dSApple OSS Distributions 			upcall_flags |= WQ_FLAG_THREAD_KEVENT;
3285*fdd8201dSApple OSS Distributions 		}
3286*fdd8201dSApple OSS Distributions 		if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
3287*fdd8201dSApple OSS Distributions 			upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
3288*fdd8201dSApple OSS Distributions 		} else {
3289*fdd8201dSApple OSS Distributions 			if (workq_thread_is_overcommit(uth)) {
3290*fdd8201dSApple OSS Distributions 				upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
3291*fdd8201dSApple OSS Distributions 			}
3292*fdd8201dSApple OSS Distributions 			if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
3293*fdd8201dSApple OSS Distributions 				upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
3294*fdd8201dSApple OSS Distributions 			} else {
3295*fdd8201dSApple OSS Distributions 				upcall_flags |= uth->uu_workq_pri.qos_req |
3296*fdd8201dSApple OSS Distributions 				    WQ_FLAG_THREAD_PRIO_QOS;
3297*fdd8201dSApple OSS Distributions 			}
3298*fdd8201dSApple OSS Distributions 		}
3299*fdd8201dSApple OSS Distributions 		error = pthread_functions->workq_handle_stack_events(p, th,
3300*fdd8201dSApple OSS Distributions 		    get_task_map(p->task), uth->uu_workq_stackaddr,
3301*fdd8201dSApple OSS Distributions 		    uth->uu_workq_thport, eventlist, nevents, upcall_flags);
3302*fdd8201dSApple OSS Distributions 		if (error) {
3303*fdd8201dSApple OSS Distributions 			assert(uth->uu_kqr_bound == kqr);
3304*fdd8201dSApple OSS Distributions 			return error;
3305*fdd8201dSApple OSS Distributions 		}
3306*fdd8201dSApple OSS Distributions 
3307*fdd8201dSApple OSS Distributions 		// pthread is supposed to pass KEVENT_FLAG_PARKING here
3308*fdd8201dSApple OSS Distributions 		// which should cause the above call to either:
3309*fdd8201dSApple OSS Distributions 		// - not return
3310*fdd8201dSApple OSS Distributions 		// - return an error
3311*fdd8201dSApple OSS Distributions 		// - return 0 and have unbound properly
3312*fdd8201dSApple OSS Distributions 		assert(uth->uu_kqr_bound == NULL);
3313*fdd8201dSApple OSS Distributions 	}
3314*fdd8201dSApple OSS Distributions 
3315*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, uap->options, 0, 0);
3316*fdd8201dSApple OSS Distributions 
3317*fdd8201dSApple OSS Distributions 	thread_sched_call(th, NULL);
3318*fdd8201dSApple OSS Distributions 	thread_will_park_or_terminate(th);
3319*fdd8201dSApple OSS Distributions #if CONFIG_WORKLOOP_DEBUG
3320*fdd8201dSApple OSS Distributions 	UU_KEVENT_HISTORY_WRITE_ENTRY(uth, { .uu_error = -1, });
3321*fdd8201dSApple OSS Distributions #endif
3322*fdd8201dSApple OSS Distributions 
3323*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
3324*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3325*fdd8201dSApple OSS Distributions 	uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
3326*fdd8201dSApple OSS Distributions 	workq_select_threadreq_or_park_and_unlock(p, wq, uth,
3327*fdd8201dSApple OSS Distributions 	    WQ_SETUP_CLEAR_VOUCHER);
3328*fdd8201dSApple OSS Distributions 	__builtin_unreachable();
3329*fdd8201dSApple OSS Distributions }
3330*fdd8201dSApple OSS Distributions 
3331*fdd8201dSApple OSS Distributions /**
3332*fdd8201dSApple OSS Distributions  * Multiplexed call to interact with the workqueue mechanism
3333*fdd8201dSApple OSS Distributions  */
3334*fdd8201dSApple OSS Distributions int
workq_kernreturn(struct proc * p,struct workq_kernreturn_args * uap,int32_t * retval)3335*fdd8201dSApple OSS Distributions workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
3336*fdd8201dSApple OSS Distributions {
3337*fdd8201dSApple OSS Distributions 	int options = uap->options;
3338*fdd8201dSApple OSS Distributions 	int arg2 = uap->affinity;
3339*fdd8201dSApple OSS Distributions 	int arg3 = uap->prio;
3340*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
3341*fdd8201dSApple OSS Distributions 	int error = 0;
3342*fdd8201dSApple OSS Distributions 
3343*fdd8201dSApple OSS Distributions 	if ((p->p_lflag & P_LREGISTER) == 0) {
3344*fdd8201dSApple OSS Distributions 		return EINVAL;
3345*fdd8201dSApple OSS Distributions 	}
3346*fdd8201dSApple OSS Distributions 
3347*fdd8201dSApple OSS Distributions 	switch (options) {
3348*fdd8201dSApple OSS Distributions 	case WQOPS_QUEUE_NEWSPISUPP: {
3349*fdd8201dSApple OSS Distributions 		/*
3350*fdd8201dSApple OSS Distributions 		 * arg2 = offset of serialno into dispatch queue
3351*fdd8201dSApple OSS Distributions 		 * arg3 = kevent support
3352*fdd8201dSApple OSS Distributions 		 */
3353*fdd8201dSApple OSS Distributions 		int offset = arg2;
3354*fdd8201dSApple OSS Distributions 		if (arg3 & 0x01) {
3355*fdd8201dSApple OSS Distributions 			// If we get here, then userspace has indicated support for kevent delivery.
3356*fdd8201dSApple OSS Distributions 		}
3357*fdd8201dSApple OSS Distributions 
3358*fdd8201dSApple OSS Distributions 		p->p_dispatchqueue_serialno_offset = (uint64_t)offset;
3359*fdd8201dSApple OSS Distributions 		break;
3360*fdd8201dSApple OSS Distributions 	}
3361*fdd8201dSApple OSS Distributions 	case WQOPS_QUEUE_REQTHREADS: {
3362*fdd8201dSApple OSS Distributions 		/*
3363*fdd8201dSApple OSS Distributions 		 * arg2 = number of threads to start
3364*fdd8201dSApple OSS Distributions 		 * arg3 = priority
3365*fdd8201dSApple OSS Distributions 		 */
3366*fdd8201dSApple OSS Distributions 		error = workq_reqthreads(p, arg2, arg3, false);
3367*fdd8201dSApple OSS Distributions 		break;
3368*fdd8201dSApple OSS Distributions 	}
3369*fdd8201dSApple OSS Distributions 	/* For requesting threads for the cooperative pool */
3370*fdd8201dSApple OSS Distributions 	case WQOPS_QUEUE_REQTHREADS2: {
3371*fdd8201dSApple OSS Distributions 		/*
3372*fdd8201dSApple OSS Distributions 		 * arg2 = number of threads to start
3373*fdd8201dSApple OSS Distributions 		 * arg3 = priority
3374*fdd8201dSApple OSS Distributions 		 */
3375*fdd8201dSApple OSS Distributions 		error = workq_reqthreads(p, arg2, arg3, true);
3376*fdd8201dSApple OSS Distributions 		break;
3377*fdd8201dSApple OSS Distributions 	}
3378*fdd8201dSApple OSS Distributions 	case WQOPS_SET_EVENT_MANAGER_PRIORITY: {
3379*fdd8201dSApple OSS Distributions 		/*
3380*fdd8201dSApple OSS Distributions 		 * arg2 = priority for the manager thread
3381*fdd8201dSApple OSS Distributions 		 *
3382*fdd8201dSApple OSS Distributions 		 * if _PTHREAD_PRIORITY_SCHED_PRI_FLAG is set,
3383*fdd8201dSApple OSS Distributions 		 * the low bits of the value contains a scheduling priority
3384*fdd8201dSApple OSS Distributions 		 * instead of a QOS value
3385*fdd8201dSApple OSS Distributions 		 */
3386*fdd8201dSApple OSS Distributions 		pthread_priority_t pri = arg2;
3387*fdd8201dSApple OSS Distributions 
3388*fdd8201dSApple OSS Distributions 		if (wq == NULL) {
3389*fdd8201dSApple OSS Distributions 			error = EINVAL;
3390*fdd8201dSApple OSS Distributions 			break;
3391*fdd8201dSApple OSS Distributions 		}
3392*fdd8201dSApple OSS Distributions 
3393*fdd8201dSApple OSS Distributions 		/*
3394*fdd8201dSApple OSS Distributions 		 * Normalize the incoming priority so that it is ordered numerically.
3395*fdd8201dSApple OSS Distributions 		 */
3396*fdd8201dSApple OSS Distributions 		if (pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
3397*fdd8201dSApple OSS Distributions 			pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
3398*fdd8201dSApple OSS Distributions 			    _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
3399*fdd8201dSApple OSS Distributions 		} else {
3400*fdd8201dSApple OSS Distributions 			thread_qos_t qos = _pthread_priority_thread_qos(pri);
3401*fdd8201dSApple OSS Distributions 			int relpri = _pthread_priority_relpri(pri);
3402*fdd8201dSApple OSS Distributions 			if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
3403*fdd8201dSApple OSS Distributions 			    qos == THREAD_QOS_UNSPECIFIED) {
3404*fdd8201dSApple OSS Distributions 				error = EINVAL;
3405*fdd8201dSApple OSS Distributions 				break;
3406*fdd8201dSApple OSS Distributions 			}
3407*fdd8201dSApple OSS Distributions 			pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
3408*fdd8201dSApple OSS Distributions 		}
3409*fdd8201dSApple OSS Distributions 
3410*fdd8201dSApple OSS Distributions 		/*
3411*fdd8201dSApple OSS Distributions 		 * If userspace passes a scheduling priority, that wins over any QoS.
3412*fdd8201dSApple OSS Distributions 		 * Userspace should takes care not to lower the priority this way.
3413*fdd8201dSApple OSS Distributions 		 */
3414*fdd8201dSApple OSS Distributions 		workq_lock_spin(wq);
3415*fdd8201dSApple OSS Distributions 		if (wq->wq_event_manager_priority < (uint32_t)pri) {
3416*fdd8201dSApple OSS Distributions 			wq->wq_event_manager_priority = (uint32_t)pri;
3417*fdd8201dSApple OSS Distributions 		}
3418*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
3419*fdd8201dSApple OSS Distributions 		break;
3420*fdd8201dSApple OSS Distributions 	}
3421*fdd8201dSApple OSS Distributions 	case WQOPS_THREAD_KEVENT_RETURN:
3422*fdd8201dSApple OSS Distributions 	case WQOPS_THREAD_WORKLOOP_RETURN:
3423*fdd8201dSApple OSS Distributions 	case WQOPS_THREAD_RETURN: {
3424*fdd8201dSApple OSS Distributions 		error = workq_thread_return(p, uap, wq);
3425*fdd8201dSApple OSS Distributions 		break;
3426*fdd8201dSApple OSS Distributions 	}
3427*fdd8201dSApple OSS Distributions 
3428*fdd8201dSApple OSS Distributions 	case WQOPS_SHOULD_NARROW: {
3429*fdd8201dSApple OSS Distributions 		/*
3430*fdd8201dSApple OSS Distributions 		 * arg2 = priority to test
3431*fdd8201dSApple OSS Distributions 		 * arg3 = unused
3432*fdd8201dSApple OSS Distributions 		 */
3433*fdd8201dSApple OSS Distributions 		thread_t th = current_thread();
3434*fdd8201dSApple OSS Distributions 		struct uthread *uth = get_bsdthread_info(th);
3435*fdd8201dSApple OSS Distributions 		if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3436*fdd8201dSApple OSS Distributions 		    (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
3437*fdd8201dSApple OSS Distributions 			error = EINVAL;
3438*fdd8201dSApple OSS Distributions 			break;
3439*fdd8201dSApple OSS Distributions 		}
3440*fdd8201dSApple OSS Distributions 
3441*fdd8201dSApple OSS Distributions 		thread_qos_t qos = _pthread_priority_thread_qos(arg2);
3442*fdd8201dSApple OSS Distributions 		if (qos == THREAD_QOS_UNSPECIFIED) {
3443*fdd8201dSApple OSS Distributions 			error = EINVAL;
3444*fdd8201dSApple OSS Distributions 			break;
3445*fdd8201dSApple OSS Distributions 		}
3446*fdd8201dSApple OSS Distributions 		workq_lock_spin(wq);
3447*fdd8201dSApple OSS Distributions 		bool should_narrow = !workq_constrained_allowance(wq, qos, uth, false);
3448*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
3449*fdd8201dSApple OSS Distributions 
3450*fdd8201dSApple OSS Distributions 		*retval = should_narrow;
3451*fdd8201dSApple OSS Distributions 		break;
3452*fdd8201dSApple OSS Distributions 	}
3453*fdd8201dSApple OSS Distributions 	case WQOPS_SETUP_DISPATCH: {
3454*fdd8201dSApple OSS Distributions 		/*
3455*fdd8201dSApple OSS Distributions 		 * item = pointer to workq_dispatch_config structure
3456*fdd8201dSApple OSS Distributions 		 * arg2 = sizeof(item)
3457*fdd8201dSApple OSS Distributions 		 */
3458*fdd8201dSApple OSS Distributions 		struct workq_dispatch_config cfg;
3459*fdd8201dSApple OSS Distributions 		bzero(&cfg, sizeof(cfg));
3460*fdd8201dSApple OSS Distributions 
3461*fdd8201dSApple OSS Distributions 		error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
3462*fdd8201dSApple OSS Distributions 		if (error) {
3463*fdd8201dSApple OSS Distributions 			break;
3464*fdd8201dSApple OSS Distributions 		}
3465*fdd8201dSApple OSS Distributions 
3466*fdd8201dSApple OSS Distributions 		if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
3467*fdd8201dSApple OSS Distributions 		    cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
3468*fdd8201dSApple OSS Distributions 			error = ENOTSUP;
3469*fdd8201dSApple OSS Distributions 			break;
3470*fdd8201dSApple OSS Distributions 		}
3471*fdd8201dSApple OSS Distributions 
3472*fdd8201dSApple OSS Distributions 		/* Load fields from version 1 */
3473*fdd8201dSApple OSS Distributions 		p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
3474*fdd8201dSApple OSS Distributions 
3475*fdd8201dSApple OSS Distributions 		/* Load fields from version 2 */
3476*fdd8201dSApple OSS Distributions 		if (cfg.wdc_version >= 2) {
3477*fdd8201dSApple OSS Distributions 			p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
3478*fdd8201dSApple OSS Distributions 		}
3479*fdd8201dSApple OSS Distributions 
3480*fdd8201dSApple OSS Distributions 		break;
3481*fdd8201dSApple OSS Distributions 	}
3482*fdd8201dSApple OSS Distributions 	default:
3483*fdd8201dSApple OSS Distributions 		error = EINVAL;
3484*fdd8201dSApple OSS Distributions 		break;
3485*fdd8201dSApple OSS Distributions 	}
3486*fdd8201dSApple OSS Distributions 
3487*fdd8201dSApple OSS Distributions 	return error;
3488*fdd8201dSApple OSS Distributions }
3489*fdd8201dSApple OSS Distributions 
3490*fdd8201dSApple OSS Distributions /*
3491*fdd8201dSApple OSS Distributions  * We have no work to do, park ourselves on the idle list.
3492*fdd8201dSApple OSS Distributions  *
3493*fdd8201dSApple OSS Distributions  * Consumes the workqueue lock and does not return.
3494*fdd8201dSApple OSS Distributions  */
3495*fdd8201dSApple OSS Distributions __attribute__((noreturn, noinline))
3496*fdd8201dSApple OSS Distributions static void
workq_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)3497*fdd8201dSApple OSS Distributions workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
3498*fdd8201dSApple OSS Distributions     uint32_t setup_flags)
3499*fdd8201dSApple OSS Distributions {
3500*fdd8201dSApple OSS Distributions 	assert(uth == current_uthread());
3501*fdd8201dSApple OSS Distributions 	assert(uth->uu_kqr_bound == NULL);
3502*fdd8201dSApple OSS Distributions 	workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
3503*fdd8201dSApple OSS Distributions 
3504*fdd8201dSApple OSS Distributions 	workq_thread_reset_cpupercent(NULL, uth);
3505*fdd8201dSApple OSS Distributions 
3506*fdd8201dSApple OSS Distributions #if CONFIG_PREADOPT_TG
3507*fdd8201dSApple OSS Distributions 	/* Clear the preadoption thread group on the thread.
3508*fdd8201dSApple OSS Distributions 	 *
3509*fdd8201dSApple OSS Distributions 	 * Case 1:
3510*fdd8201dSApple OSS Distributions 	 *		Creator thread which never picked up a thread request. We set a
3511*fdd8201dSApple OSS Distributions 	 *		preadoption thread group on creator threads but if it never picked
3512*fdd8201dSApple OSS Distributions 	 *		up a thread request and didn't go to userspace, then the thread will
3513*fdd8201dSApple OSS Distributions 	 *		park with a preadoption thread group but no explicitly adopted
3514*fdd8201dSApple OSS Distributions 	 *		voucher or work interval.
3515*fdd8201dSApple OSS Distributions 	 *
3516*fdd8201dSApple OSS Distributions 	 *		We drop the preadoption thread group here before proceeding to park.
3517*fdd8201dSApple OSS Distributions 	 *		Note - we may get preempted when we drop the workq lock below.
3518*fdd8201dSApple OSS Distributions 	 *
3519*fdd8201dSApple OSS Distributions 	 * Case 2:
3520*fdd8201dSApple OSS Distributions 	 *		Thread picked up a thread request and bound to it and returned back
3521*fdd8201dSApple OSS Distributions 	 *		from userspace and is parking. At this point, preadoption thread
3522*fdd8201dSApple OSS Distributions 	 *		group should be NULL since the thread has unbound from the thread
3523*fdd8201dSApple OSS Distributions 	 *		request. So this operation should be a no-op.
3524*fdd8201dSApple OSS Distributions 	 */
3525*fdd8201dSApple OSS Distributions 	thread_set_preadopt_thread_group(get_machthread(uth), NULL);
3526*fdd8201dSApple OSS Distributions #endif
3527*fdd8201dSApple OSS Distributions 
3528*fdd8201dSApple OSS Distributions 	if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
3529*fdd8201dSApple OSS Distributions 	    !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
3530*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
3531*fdd8201dSApple OSS Distributions 
3532*fdd8201dSApple OSS Distributions 		/*
3533*fdd8201dSApple OSS Distributions 		 * workq_push_idle_thread() will unset `has_stack`
3534*fdd8201dSApple OSS Distributions 		 * if it wants us to free the stack before parking.
3535*fdd8201dSApple OSS Distributions 		 */
3536*fdd8201dSApple OSS Distributions 		if (!uth->uu_save.uus_workq_park_data.has_stack) {
3537*fdd8201dSApple OSS Distributions 			pthread_functions->workq_markfree_threadstack(p,
3538*fdd8201dSApple OSS Distributions 			    get_machthread(uth), get_task_map(p->task),
3539*fdd8201dSApple OSS Distributions 			    uth->uu_workq_stackaddr);
3540*fdd8201dSApple OSS Distributions 		}
3541*fdd8201dSApple OSS Distributions 
3542*fdd8201dSApple OSS Distributions 		/*
3543*fdd8201dSApple OSS Distributions 		 * When we remove the voucher from the thread, we may lose our importance
3544*fdd8201dSApple OSS Distributions 		 * causing us to get preempted, so we do this after putting the thread on
3545*fdd8201dSApple OSS Distributions 		 * the idle list.  Then, when we get our importance back we'll be able to
3546*fdd8201dSApple OSS Distributions 		 * use this thread from e.g. the kevent call out to deliver a boosting
3547*fdd8201dSApple OSS Distributions 		 * message.
3548*fdd8201dSApple OSS Distributions 		 *
3549*fdd8201dSApple OSS Distributions 		 * Note that setting the voucher to NULL will not clear the preadoption
3550*fdd8201dSApple OSS Distributions 		 * thread since this thread could have become the creator again and
3551*fdd8201dSApple OSS Distributions 		 * perhaps acquired a preadoption thread group.
3552*fdd8201dSApple OSS Distributions 		 */
3553*fdd8201dSApple OSS Distributions 		__assert_only kern_return_t kr;
3554*fdd8201dSApple OSS Distributions 		kr = thread_set_voucher_name(MACH_PORT_NULL);
3555*fdd8201dSApple OSS Distributions 		assert(kr == KERN_SUCCESS);
3556*fdd8201dSApple OSS Distributions 
3557*fdd8201dSApple OSS Distributions 		workq_lock_spin(wq);
3558*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
3559*fdd8201dSApple OSS Distributions 		setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
3560*fdd8201dSApple OSS Distributions 	}
3561*fdd8201dSApple OSS Distributions 
3562*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3563*fdd8201dSApple OSS Distributions 
3564*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
3565*fdd8201dSApple OSS Distributions 		/*
3566*fdd8201dSApple OSS Distributions 		 * While we'd dropped the lock to unset our voucher, someone came
3567*fdd8201dSApple OSS Distributions 		 * around and made us runnable.  But because we weren't waiting on the
3568*fdd8201dSApple OSS Distributions 		 * event their thread_wakeup() was ineffectual.  To correct for that,
3569*fdd8201dSApple OSS Distributions 		 * we just run the continuation ourselves.
3570*fdd8201dSApple OSS Distributions 		 */
3571*fdd8201dSApple OSS Distributions 		workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
3572*fdd8201dSApple OSS Distributions 		__builtin_unreachable();
3573*fdd8201dSApple OSS Distributions 	}
3574*fdd8201dSApple OSS Distributions 
3575*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3576*fdd8201dSApple OSS Distributions 		workq_unpark_for_death_and_unlock(p, wq, uth,
3577*fdd8201dSApple OSS Distributions 		    WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
3578*fdd8201dSApple OSS Distributions 		__builtin_unreachable();
3579*fdd8201dSApple OSS Distributions 	}
3580*fdd8201dSApple OSS Distributions 
3581*fdd8201dSApple OSS Distributions 	/* Disarm the workqueue quantum since the thread is now idle */
3582*fdd8201dSApple OSS Distributions 	thread_disarm_workqueue_quantum(get_machthread(uth));
3583*fdd8201dSApple OSS Distributions 
3584*fdd8201dSApple OSS Distributions 	thread_set_pending_block_hint(get_machthread(uth), kThreadWaitParkedWorkQueue);
3585*fdd8201dSApple OSS Distributions 	assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
3586*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
3587*fdd8201dSApple OSS Distributions 	thread_block(workq_unpark_continue);
3588*fdd8201dSApple OSS Distributions 	__builtin_unreachable();
3589*fdd8201dSApple OSS Distributions }
3590*fdd8201dSApple OSS Distributions 
3591*fdd8201dSApple OSS Distributions static inline bool
workq_may_start_event_mgr_thread(struct workqueue * wq,struct uthread * uth)3592*fdd8201dSApple OSS Distributions workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth)
3593*fdd8201dSApple OSS Distributions {
3594*fdd8201dSApple OSS Distributions 	/*
3595*fdd8201dSApple OSS Distributions 	 * There's an event manager request and either:
3596*fdd8201dSApple OSS Distributions 	 * - no event manager currently running
3597*fdd8201dSApple OSS Distributions 	 * - we are re-using the event manager
3598*fdd8201dSApple OSS Distributions 	 */
3599*fdd8201dSApple OSS Distributions 	return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
3600*fdd8201dSApple OSS Distributions 	       (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
3601*fdd8201dSApple OSS Distributions }
3602*fdd8201dSApple OSS Distributions 
3603*fdd8201dSApple OSS Distributions static uint32_t
workq_constrained_allowance(struct workqueue * wq,thread_qos_t at_qos,struct uthread * uth,bool may_start_timer)3604*fdd8201dSApple OSS Distributions workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
3605*fdd8201dSApple OSS Distributions     struct uthread *uth, bool may_start_timer)
3606*fdd8201dSApple OSS Distributions {
3607*fdd8201dSApple OSS Distributions 	assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
3608*fdd8201dSApple OSS Distributions 	uint32_t count = 0;
3609*fdd8201dSApple OSS Distributions 
3610*fdd8201dSApple OSS Distributions 	uint32_t max_count = wq->wq_constrained_threads_scheduled;
3611*fdd8201dSApple OSS Distributions 	if (uth && workq_thread_is_nonovercommit(uth)) {
3612*fdd8201dSApple OSS Distributions 		/*
3613*fdd8201dSApple OSS Distributions 		 * don't count the current thread as scheduled
3614*fdd8201dSApple OSS Distributions 		 */
3615*fdd8201dSApple OSS Distributions 		assert(max_count > 0);
3616*fdd8201dSApple OSS Distributions 		max_count--;
3617*fdd8201dSApple OSS Distributions 	}
3618*fdd8201dSApple OSS Distributions 	if (max_count >= wq_max_constrained_threads) {
3619*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
3620*fdd8201dSApple OSS Distributions 		    wq->wq_constrained_threads_scheduled,
3621*fdd8201dSApple OSS Distributions 		    wq_max_constrained_threads);
3622*fdd8201dSApple OSS Distributions 		/*
3623*fdd8201dSApple OSS Distributions 		 * we need 1 or more constrained threads to return to the kernel before
3624*fdd8201dSApple OSS Distributions 		 * we can dispatch additional work
3625*fdd8201dSApple OSS Distributions 		 */
3626*fdd8201dSApple OSS Distributions 		return 0;
3627*fdd8201dSApple OSS Distributions 	}
3628*fdd8201dSApple OSS Distributions 	max_count -= wq_max_constrained_threads;
3629*fdd8201dSApple OSS Distributions 
3630*fdd8201dSApple OSS Distributions 	/*
3631*fdd8201dSApple OSS Distributions 	 * Compute a metric for many how many threads are active.  We find the
3632*fdd8201dSApple OSS Distributions 	 * highest priority request outstanding and then add up the number of active
3633*fdd8201dSApple OSS Distributions 	 * threads in that and all higher-priority buckets.  We'll also add any
3634*fdd8201dSApple OSS Distributions 	 * "busy" threads which are not currently active but blocked recently enough
3635*fdd8201dSApple OSS Distributions 	 * that we can't be sure that they won't be unblocked soon and start
3636*fdd8201dSApple OSS Distributions 	 * being active again.
3637*fdd8201dSApple OSS Distributions 	 *
3638*fdd8201dSApple OSS Distributions 	 * We'll then compare this metric to our max concurrency to decide whether
3639*fdd8201dSApple OSS Distributions 	 * to add a new thread.
3640*fdd8201dSApple OSS Distributions 	 */
3641*fdd8201dSApple OSS Distributions 
3642*fdd8201dSApple OSS Distributions 	uint32_t busycount, thactive_count;
3643*fdd8201dSApple OSS Distributions 
3644*fdd8201dSApple OSS Distributions 	thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
3645*fdd8201dSApple OSS Distributions 	    at_qos, &busycount, NULL);
3646*fdd8201dSApple OSS Distributions 
3647*fdd8201dSApple OSS Distributions 	if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
3648*fdd8201dSApple OSS Distributions 	    at_qos <= uth->uu_workq_pri.qos_bucket) {
3649*fdd8201dSApple OSS Distributions 		/*
3650*fdd8201dSApple OSS Distributions 		 * Don't count this thread as currently active, but only if it's not
3651*fdd8201dSApple OSS Distributions 		 * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
3652*fdd8201dSApple OSS Distributions 		 * managers.
3653*fdd8201dSApple OSS Distributions 		 */
3654*fdd8201dSApple OSS Distributions 		assert(thactive_count > 0);
3655*fdd8201dSApple OSS Distributions 		thactive_count--;
3656*fdd8201dSApple OSS Distributions 	}
3657*fdd8201dSApple OSS Distributions 
3658*fdd8201dSApple OSS Distributions 	count = wq_max_parallelism[_wq_bucket(at_qos)];
3659*fdd8201dSApple OSS Distributions 	if (count > thactive_count + busycount) {
3660*fdd8201dSApple OSS Distributions 		count -= thactive_count + busycount;
3661*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
3662*fdd8201dSApple OSS Distributions 		    thactive_count, busycount);
3663*fdd8201dSApple OSS Distributions 		return MIN(count, max_count);
3664*fdd8201dSApple OSS Distributions 	} else {
3665*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
3666*fdd8201dSApple OSS Distributions 		    thactive_count, busycount);
3667*fdd8201dSApple OSS Distributions 	}
3668*fdd8201dSApple OSS Distributions 
3669*fdd8201dSApple OSS Distributions 	if (may_start_timer) {
3670*fdd8201dSApple OSS Distributions 		/*
3671*fdd8201dSApple OSS Distributions 		 * If this is called from the add timer, we won't have another timer
3672*fdd8201dSApple OSS Distributions 		 * fire when the thread exits the "busy" state, so rearm the timer.
3673*fdd8201dSApple OSS Distributions 		 */
3674*fdd8201dSApple OSS Distributions 		workq_schedule_delayed_thread_creation(wq, 0);
3675*fdd8201dSApple OSS Distributions 	}
3676*fdd8201dSApple OSS Distributions 
3677*fdd8201dSApple OSS Distributions 	return 0;
3678*fdd8201dSApple OSS Distributions }
3679*fdd8201dSApple OSS Distributions 
3680*fdd8201dSApple OSS Distributions static bool
workq_threadreq_admissible(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req)3681*fdd8201dSApple OSS Distributions workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
3682*fdd8201dSApple OSS Distributions     workq_threadreq_t req)
3683*fdd8201dSApple OSS Distributions {
3684*fdd8201dSApple OSS Distributions 	if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
3685*fdd8201dSApple OSS Distributions 		return workq_may_start_event_mgr_thread(wq, uth);
3686*fdd8201dSApple OSS Distributions 	}
3687*fdd8201dSApple OSS Distributions 	if (workq_threadreq_is_cooperative(req)) {
3688*fdd8201dSApple OSS Distributions 		return workq_cooperative_allowance(wq, req->tr_qos, uth, true);
3689*fdd8201dSApple OSS Distributions 	}
3690*fdd8201dSApple OSS Distributions 	if (workq_threadreq_is_nonovercommit(req)) {
3691*fdd8201dSApple OSS Distributions 		return workq_constrained_allowance(wq, req->tr_qos, uth, true);
3692*fdd8201dSApple OSS Distributions 	}
3693*fdd8201dSApple OSS Distributions 
3694*fdd8201dSApple OSS Distributions 	return true;
3695*fdd8201dSApple OSS Distributions }
3696*fdd8201dSApple OSS Distributions 
3697*fdd8201dSApple OSS Distributions /*
3698*fdd8201dSApple OSS Distributions  * Called from the context of selecting thread requests for threads returning
3699*fdd8201dSApple OSS Distributions  * from userspace or creator thread
3700*fdd8201dSApple OSS Distributions  */
3701*fdd8201dSApple OSS Distributions static workq_threadreq_t
workq_cooperative_queue_best_req(struct workqueue * wq,struct uthread * uth)3702*fdd8201dSApple OSS Distributions workq_cooperative_queue_best_req(struct workqueue *wq, struct uthread *uth)
3703*fdd8201dSApple OSS Distributions {
3704*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
3705*fdd8201dSApple OSS Distributions 
3706*fdd8201dSApple OSS Distributions 	/*
3707*fdd8201dSApple OSS Distributions 	 * If the current thread is cooperative, we need to exclude it as part of
3708*fdd8201dSApple OSS Distributions 	 * cooperative schedule count since this thread is looking for a new
3709*fdd8201dSApple OSS Distributions 	 * request. Change in the schedule count for cooperative pool therefore
3710*fdd8201dSApple OSS Distributions 	 * requires us to reeevaluate the next best request for it.
3711*fdd8201dSApple OSS Distributions 	 */
3712*fdd8201dSApple OSS Distributions 	if (uth && workq_thread_is_cooperative(uth)) {
3713*fdd8201dSApple OSS Distributions 		_wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_bucket);
3714*fdd8201dSApple OSS Distributions 
3715*fdd8201dSApple OSS Distributions 		(void) _wq_cooperative_queue_refresh_best_req_qos(wq);
3716*fdd8201dSApple OSS Distributions 
3717*fdd8201dSApple OSS Distributions 		_wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_bucket);
3718*fdd8201dSApple OSS Distributions 	} else {
3719*fdd8201dSApple OSS Distributions 		/*
3720*fdd8201dSApple OSS Distributions 		 * The old value that was already precomputed should be safe to use -
3721*fdd8201dSApple OSS Distributions 		 * add an assert that asserts that the best req QoS doesn't change in
3722*fdd8201dSApple OSS Distributions 		 * this case
3723*fdd8201dSApple OSS Distributions 		 */
3724*fdd8201dSApple OSS Distributions 		assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
3725*fdd8201dSApple OSS Distributions 	}
3726*fdd8201dSApple OSS Distributions 
3727*fdd8201dSApple OSS Distributions 	thread_qos_t qos = wq->wq_cooperative_queue_best_req_qos;
3728*fdd8201dSApple OSS Distributions 
3729*fdd8201dSApple OSS Distributions 	/* There are no eligible requests in the cooperative pool */
3730*fdd8201dSApple OSS Distributions 	if (qos == THREAD_QOS_UNSPECIFIED) {
3731*fdd8201dSApple OSS Distributions 		return NULL;
3732*fdd8201dSApple OSS Distributions 	}
3733*fdd8201dSApple OSS Distributions 	assert(qos != WORKQ_THREAD_QOS_ABOVEUI);
3734*fdd8201dSApple OSS Distributions 	assert(qos != WORKQ_THREAD_QOS_MANAGER);
3735*fdd8201dSApple OSS Distributions 
3736*fdd8201dSApple OSS Distributions 	uint8_t bucket = _wq_bucket(qos);
3737*fdd8201dSApple OSS Distributions 	assert(!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket]));
3738*fdd8201dSApple OSS Distributions 
3739*fdd8201dSApple OSS Distributions 	return STAILQ_FIRST(&wq->wq_cooperative_queue[bucket]);
3740*fdd8201dSApple OSS Distributions }
3741*fdd8201dSApple OSS Distributions 
3742*fdd8201dSApple OSS Distributions static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue * wq)3743*fdd8201dSApple OSS Distributions workq_threadreq_select_for_creator(struct workqueue *wq)
3744*fdd8201dSApple OSS Distributions {
3745*fdd8201dSApple OSS Distributions 	workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
3746*fdd8201dSApple OSS Distributions 	thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
3747*fdd8201dSApple OSS Distributions 	uint8_t pri = 0;
3748*fdd8201dSApple OSS Distributions 
3749*fdd8201dSApple OSS Distributions 	/*
3750*fdd8201dSApple OSS Distributions 	 * Compute the best priority request, and ignore the turnstile for now
3751*fdd8201dSApple OSS Distributions 	 */
3752*fdd8201dSApple OSS Distributions 
3753*fdd8201dSApple OSS Distributions 	req_pri = priority_queue_max(&wq->wq_special_queue,
3754*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
3755*fdd8201dSApple OSS Distributions 	if (req_pri) {
3756*fdd8201dSApple OSS Distributions 		pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
3757*fdd8201dSApple OSS Distributions 		    &req_pri->tr_entry);
3758*fdd8201dSApple OSS Distributions 	}
3759*fdd8201dSApple OSS Distributions 
3760*fdd8201dSApple OSS Distributions 	/*
3761*fdd8201dSApple OSS Distributions 	 * Handle the manager thread request. The special queue might yield
3762*fdd8201dSApple OSS Distributions 	 * a higher priority, but the manager always beats the QoS world.
3763*fdd8201dSApple OSS Distributions 	 */
3764*fdd8201dSApple OSS Distributions 
3765*fdd8201dSApple OSS Distributions 	req_mgr = wq->wq_event_manager_threadreq;
3766*fdd8201dSApple OSS Distributions 	if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
3767*fdd8201dSApple OSS Distributions 		uint32_t mgr_pri = wq->wq_event_manager_priority;
3768*fdd8201dSApple OSS Distributions 
3769*fdd8201dSApple OSS Distributions 		if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
3770*fdd8201dSApple OSS Distributions 			mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
3771*fdd8201dSApple OSS Distributions 		} else {
3772*fdd8201dSApple OSS Distributions 			mgr_pri = thread_workq_pri_for_qos(
3773*fdd8201dSApple OSS Distributions 				_pthread_priority_thread_qos(mgr_pri));
3774*fdd8201dSApple OSS Distributions 		}
3775*fdd8201dSApple OSS Distributions 
3776*fdd8201dSApple OSS Distributions 		return mgr_pri >= pri ? req_mgr : req_pri;
3777*fdd8201dSApple OSS Distributions 	}
3778*fdd8201dSApple OSS Distributions 
3779*fdd8201dSApple OSS Distributions 	/*
3780*fdd8201dSApple OSS Distributions 	 * Compute the best QoS Request, and check whether it beats the "pri" one
3781*fdd8201dSApple OSS Distributions 	 *
3782*fdd8201dSApple OSS Distributions 	 * Start by comparing the overcommit and the cooperative pool
3783*fdd8201dSApple OSS Distributions 	 */
3784*fdd8201dSApple OSS Distributions 	req_qos = priority_queue_max(&wq->wq_overcommit_queue,
3785*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
3786*fdd8201dSApple OSS Distributions 	if (req_qos) {
3787*fdd8201dSApple OSS Distributions 		qos = req_qos->tr_qos;
3788*fdd8201dSApple OSS Distributions 	}
3789*fdd8201dSApple OSS Distributions 
3790*fdd8201dSApple OSS Distributions 	req_tmp = workq_cooperative_queue_best_req(wq, NULL);
3791*fdd8201dSApple OSS Distributions 	if (req_tmp && qos <= req_tmp->tr_qos) {
3792*fdd8201dSApple OSS Distributions 		/*
3793*fdd8201dSApple OSS Distributions 		 * Cooperative TR is better between overcommit and cooperative.  Note
3794*fdd8201dSApple OSS Distributions 		 * that if qos is same between overcommit and cooperative, we choose
3795*fdd8201dSApple OSS Distributions 		 * cooperative.
3796*fdd8201dSApple OSS Distributions 		 *
3797*fdd8201dSApple OSS Distributions 		 * Pick cooperative pool if it passes the admissions check
3798*fdd8201dSApple OSS Distributions 		 */
3799*fdd8201dSApple OSS Distributions 		if (workq_cooperative_allowance(wq, req_tmp->tr_qos, NULL, true)) {
3800*fdd8201dSApple OSS Distributions 			req_qos = req_tmp;
3801*fdd8201dSApple OSS Distributions 			qos = req_qos->tr_qos;
3802*fdd8201dSApple OSS Distributions 		}
3803*fdd8201dSApple OSS Distributions 	}
3804*fdd8201dSApple OSS Distributions 
3805*fdd8201dSApple OSS Distributions 	/*
3806*fdd8201dSApple OSS Distributions 	 * Compare the best QoS so far - either from overcommit or from cooperative
3807*fdd8201dSApple OSS Distributions 	 * pool - and compare it with the constrained pool
3808*fdd8201dSApple OSS Distributions 	 */
3809*fdd8201dSApple OSS Distributions 	req_tmp = priority_queue_max(&wq->wq_constrained_queue,
3810*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
3811*fdd8201dSApple OSS Distributions 
3812*fdd8201dSApple OSS Distributions 	if (req_tmp && qos < req_tmp->tr_qos) {
3813*fdd8201dSApple OSS Distributions 		/*
3814*fdd8201dSApple OSS Distributions 		 * Constrained pool is best in QoS between overcommit, cooperative
3815*fdd8201dSApple OSS Distributions 		 * and constrained. Now check how it fairs against the priority case
3816*fdd8201dSApple OSS Distributions 		 */
3817*fdd8201dSApple OSS Distributions 		if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
3818*fdd8201dSApple OSS Distributions 			return req_pri;
3819*fdd8201dSApple OSS Distributions 		}
3820*fdd8201dSApple OSS Distributions 
3821*fdd8201dSApple OSS Distributions 		if (workq_constrained_allowance(wq, req_tmp->tr_qos, NULL, true)) {
3822*fdd8201dSApple OSS Distributions 			/*
3823*fdd8201dSApple OSS Distributions 			 * If the constrained thread request is the best one and passes
3824*fdd8201dSApple OSS Distributions 			 * the admission check, pick it.
3825*fdd8201dSApple OSS Distributions 			 */
3826*fdd8201dSApple OSS Distributions 			return req_tmp;
3827*fdd8201dSApple OSS Distributions 		}
3828*fdd8201dSApple OSS Distributions 	}
3829*fdd8201dSApple OSS Distributions 
3830*fdd8201dSApple OSS Distributions 	/*
3831*fdd8201dSApple OSS Distributions 	 * Compare the best of the QoS world with the priority
3832*fdd8201dSApple OSS Distributions 	 */
3833*fdd8201dSApple OSS Distributions 	if (pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
3834*fdd8201dSApple OSS Distributions 		return req_pri;
3835*fdd8201dSApple OSS Distributions 	}
3836*fdd8201dSApple OSS Distributions 
3837*fdd8201dSApple OSS Distributions 	if (req_qos) {
3838*fdd8201dSApple OSS Distributions 		return req_qos;
3839*fdd8201dSApple OSS Distributions 	}
3840*fdd8201dSApple OSS Distributions 
3841*fdd8201dSApple OSS Distributions 	/*
3842*fdd8201dSApple OSS Distributions 	 * If we had no eligible request but we have a turnstile push,
3843*fdd8201dSApple OSS Distributions 	 * it must be a non overcommit thread request that failed
3844*fdd8201dSApple OSS Distributions 	 * the admission check.
3845*fdd8201dSApple OSS Distributions 	 *
3846*fdd8201dSApple OSS Distributions 	 * Just fake a BG thread request so that if the push stops the creator
3847*fdd8201dSApple OSS Distributions 	 * priority just drops to 4.
3848*fdd8201dSApple OSS Distributions 	 */
3849*fdd8201dSApple OSS Distributions 	if (turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, NULL)) {
3850*fdd8201dSApple OSS Distributions 		static struct workq_threadreq_s workq_sync_push_fake_req = {
3851*fdd8201dSApple OSS Distributions 			.tr_qos = THREAD_QOS_BACKGROUND,
3852*fdd8201dSApple OSS Distributions 		};
3853*fdd8201dSApple OSS Distributions 
3854*fdd8201dSApple OSS Distributions 		return &workq_sync_push_fake_req;
3855*fdd8201dSApple OSS Distributions 	}
3856*fdd8201dSApple OSS Distributions 
3857*fdd8201dSApple OSS Distributions 	return NULL;
3858*fdd8201dSApple OSS Distributions }
3859*fdd8201dSApple OSS Distributions 
3860*fdd8201dSApple OSS Distributions /*
3861*fdd8201dSApple OSS Distributions  * Returns true if this caused a change in the schedule counts of the
3862*fdd8201dSApple OSS Distributions  * cooperative pool
3863*fdd8201dSApple OSS Distributions  */
3864*fdd8201dSApple OSS Distributions static bool
workq_adjust_cooperative_constrained_schedule_counts(struct workqueue * wq,struct uthread * uth,thread_qos_t old_thread_qos,workq_tr_flags_t tr_flags)3865*fdd8201dSApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(struct workqueue *wq,
3866*fdd8201dSApple OSS Distributions     struct uthread *uth, thread_qos_t old_thread_qos, workq_tr_flags_t tr_flags)
3867*fdd8201dSApple OSS Distributions {
3868*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
3869*fdd8201dSApple OSS Distributions 
3870*fdd8201dSApple OSS Distributions 	/*
3871*fdd8201dSApple OSS Distributions 	 * Row: thread type
3872*fdd8201dSApple OSS Distributions 	 * Column: Request type
3873*fdd8201dSApple OSS Distributions 	 *
3874*fdd8201dSApple OSS Distributions 	 *					overcommit		non-overcommit		cooperative
3875*fdd8201dSApple OSS Distributions 	 * overcommit			X				case 1				case 2
3876*fdd8201dSApple OSS Distributions 	 * cooperative		case 3				case 4				case 5
3877*fdd8201dSApple OSS Distributions 	 * non-overcommit	case 6					X				case 7
3878*fdd8201dSApple OSS Distributions 	 *
3879*fdd8201dSApple OSS Distributions 	 * Move the thread to the right bucket depending on what state it currently
3880*fdd8201dSApple OSS Distributions 	 * has and what state the thread req it picks, is going to have.
3881*fdd8201dSApple OSS Distributions 	 *
3882*fdd8201dSApple OSS Distributions 	 * Note that the creator thread is an overcommit thread.
3883*fdd8201dSApple OSS Distributions 	 */
3884*fdd8201dSApple OSS Distributions 	thread_qos_t new_thread_qos = uth->uu_workq_pri.qos_bucket;
3885*fdd8201dSApple OSS Distributions 
3886*fdd8201dSApple OSS Distributions 	/*
3887*fdd8201dSApple OSS Distributions 	 * Anytime a cooperative bucket's schedule count changes, we need to
3888*fdd8201dSApple OSS Distributions 	 * potentially refresh the next best QoS for that pool when we determine
3889*fdd8201dSApple OSS Distributions 	 * the next request for the creator
3890*fdd8201dSApple OSS Distributions 	 */
3891*fdd8201dSApple OSS Distributions 	bool cooperative_pool_sched_count_changed = false;
3892*fdd8201dSApple OSS Distributions 
3893*fdd8201dSApple OSS Distributions 	if (workq_thread_is_overcommit(uth)) {
3894*fdd8201dSApple OSS Distributions 		if (workq_tr_is_nonovercommit(tr_flags)) {
3895*fdd8201dSApple OSS Distributions 			// Case 1: thread is overcommit, req is non-overcommit
3896*fdd8201dSApple OSS Distributions 			wq->wq_constrained_threads_scheduled++;
3897*fdd8201dSApple OSS Distributions 		} else if (workq_tr_is_cooperative(tr_flags)) {
3898*fdd8201dSApple OSS Distributions 			// Case 2: thread is overcommit, req is cooperative
3899*fdd8201dSApple OSS Distributions 			_wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
3900*fdd8201dSApple OSS Distributions 			cooperative_pool_sched_count_changed = true;
3901*fdd8201dSApple OSS Distributions 		}
3902*fdd8201dSApple OSS Distributions 	} else if (workq_thread_is_cooperative(uth)) {
3903*fdd8201dSApple OSS Distributions 		if (workq_tr_is_overcommit(tr_flags)) {
3904*fdd8201dSApple OSS Distributions 			// Case 3: thread is cooperative, req is overcommit
3905*fdd8201dSApple OSS Distributions 			_wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
3906*fdd8201dSApple OSS Distributions 		} else if (workq_tr_is_nonovercommit(tr_flags)) {
3907*fdd8201dSApple OSS Distributions 			// Case 4: thread is cooperative, req is non-overcommit
3908*fdd8201dSApple OSS Distributions 			_wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
3909*fdd8201dSApple OSS Distributions 			wq->wq_constrained_threads_scheduled++;
3910*fdd8201dSApple OSS Distributions 		} else {
3911*fdd8201dSApple OSS Distributions 			// Case 5: thread is cooperative, req is also cooperative
3912*fdd8201dSApple OSS Distributions 			assert(workq_tr_is_cooperative(tr_flags));
3913*fdd8201dSApple OSS Distributions 			_wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
3914*fdd8201dSApple OSS Distributions 			_wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
3915*fdd8201dSApple OSS Distributions 		}
3916*fdd8201dSApple OSS Distributions 		cooperative_pool_sched_count_changed = true;
3917*fdd8201dSApple OSS Distributions 	} else {
3918*fdd8201dSApple OSS Distributions 		if (workq_tr_is_overcommit(tr_flags)) {
3919*fdd8201dSApple OSS Distributions 			// Case 6: Thread is non-overcommit, req is overcommit
3920*fdd8201dSApple OSS Distributions 			wq->wq_constrained_threads_scheduled--;
3921*fdd8201dSApple OSS Distributions 		} else if (workq_tr_is_cooperative(tr_flags)) {
3922*fdd8201dSApple OSS Distributions 			// Case 7: Thread is non-overcommit, req is cooperative
3923*fdd8201dSApple OSS Distributions 			wq->wq_constrained_threads_scheduled--;
3924*fdd8201dSApple OSS Distributions 			_wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
3925*fdd8201dSApple OSS Distributions 			cooperative_pool_sched_count_changed = true;
3926*fdd8201dSApple OSS Distributions 		}
3927*fdd8201dSApple OSS Distributions 	}
3928*fdd8201dSApple OSS Distributions 
3929*fdd8201dSApple OSS Distributions 	return cooperative_pool_sched_count_changed;
3930*fdd8201dSApple OSS Distributions }
3931*fdd8201dSApple OSS Distributions 
3932*fdd8201dSApple OSS Distributions static workq_threadreq_t
workq_threadreq_select(struct workqueue * wq,struct uthread * uth)3933*fdd8201dSApple OSS Distributions workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
3934*fdd8201dSApple OSS Distributions {
3935*fdd8201dSApple OSS Distributions 	workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
3936*fdd8201dSApple OSS Distributions 	uintptr_t proprietor;
3937*fdd8201dSApple OSS Distributions 	thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
3938*fdd8201dSApple OSS Distributions 	uint8_t pri = 0;
3939*fdd8201dSApple OSS Distributions 
3940*fdd8201dSApple OSS Distributions 	if (uth == wq->wq_creator) {
3941*fdd8201dSApple OSS Distributions 		uth = NULL;
3942*fdd8201dSApple OSS Distributions 	}
3943*fdd8201dSApple OSS Distributions 
3944*fdd8201dSApple OSS Distributions 	/*
3945*fdd8201dSApple OSS Distributions 	 * Compute the best priority request (special or turnstile)
3946*fdd8201dSApple OSS Distributions 	 */
3947*fdd8201dSApple OSS Distributions 
3948*fdd8201dSApple OSS Distributions 	pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
3949*fdd8201dSApple OSS Distributions 	    &proprietor);
3950*fdd8201dSApple OSS Distributions 	if (pri) {
3951*fdd8201dSApple OSS Distributions 		struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
3952*fdd8201dSApple OSS Distributions 		req_pri = &kqwl->kqwl_request;
3953*fdd8201dSApple OSS Distributions 		if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
3954*fdd8201dSApple OSS Distributions 			panic("Invalid thread request (%p) state %d",
3955*fdd8201dSApple OSS Distributions 			    req_pri, req_pri->tr_state);
3956*fdd8201dSApple OSS Distributions 		}
3957*fdd8201dSApple OSS Distributions 	} else {
3958*fdd8201dSApple OSS Distributions 		req_pri = NULL;
3959*fdd8201dSApple OSS Distributions 	}
3960*fdd8201dSApple OSS Distributions 
3961*fdd8201dSApple OSS Distributions 	req_tmp = priority_queue_max(&wq->wq_special_queue,
3962*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
3963*fdd8201dSApple OSS Distributions 	if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
3964*fdd8201dSApple OSS Distributions 	    &req_tmp->tr_entry)) {
3965*fdd8201dSApple OSS Distributions 		req_pri = req_tmp;
3966*fdd8201dSApple OSS Distributions 		pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
3967*fdd8201dSApple OSS Distributions 		    &req_tmp->tr_entry);
3968*fdd8201dSApple OSS Distributions 	}
3969*fdd8201dSApple OSS Distributions 
3970*fdd8201dSApple OSS Distributions 	/*
3971*fdd8201dSApple OSS Distributions 	 * Handle the manager thread request. The special queue might yield
3972*fdd8201dSApple OSS Distributions 	 * a higher priority, but the manager always beats the QoS world.
3973*fdd8201dSApple OSS Distributions 	 */
3974*fdd8201dSApple OSS Distributions 
3975*fdd8201dSApple OSS Distributions 	req_mgr = wq->wq_event_manager_threadreq;
3976*fdd8201dSApple OSS Distributions 	if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
3977*fdd8201dSApple OSS Distributions 		uint32_t mgr_pri = wq->wq_event_manager_priority;
3978*fdd8201dSApple OSS Distributions 
3979*fdd8201dSApple OSS Distributions 		if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
3980*fdd8201dSApple OSS Distributions 			mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
3981*fdd8201dSApple OSS Distributions 		} else {
3982*fdd8201dSApple OSS Distributions 			mgr_pri = thread_workq_pri_for_qos(
3983*fdd8201dSApple OSS Distributions 				_pthread_priority_thread_qos(mgr_pri));
3984*fdd8201dSApple OSS Distributions 		}
3985*fdd8201dSApple OSS Distributions 
3986*fdd8201dSApple OSS Distributions 		return mgr_pri >= pri ? req_mgr : req_pri;
3987*fdd8201dSApple OSS Distributions 	}
3988*fdd8201dSApple OSS Distributions 
3989*fdd8201dSApple OSS Distributions 	/*
3990*fdd8201dSApple OSS Distributions 	 * Compute the best QoS Request, and check whether it beats the "pri" one
3991*fdd8201dSApple OSS Distributions 	 */
3992*fdd8201dSApple OSS Distributions 
3993*fdd8201dSApple OSS Distributions 	req_qos = priority_queue_max(&wq->wq_overcommit_queue,
3994*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
3995*fdd8201dSApple OSS Distributions 	if (req_qos) {
3996*fdd8201dSApple OSS Distributions 		qos = req_qos->tr_qos;
3997*fdd8201dSApple OSS Distributions 	}
3998*fdd8201dSApple OSS Distributions 
3999*fdd8201dSApple OSS Distributions 	req_tmp = workq_cooperative_queue_best_req(wq, uth);
4000*fdd8201dSApple OSS Distributions 	if (req_tmp && qos <= req_tmp->tr_qos) {
4001*fdd8201dSApple OSS Distributions 		/*
4002*fdd8201dSApple OSS Distributions 		 * Cooperative TR is better between overcommit and cooperative.  Note
4003*fdd8201dSApple OSS Distributions 		 * that if qos is same between overcommit and cooperative, we choose
4004*fdd8201dSApple OSS Distributions 		 * cooperative.
4005*fdd8201dSApple OSS Distributions 		 *
4006*fdd8201dSApple OSS Distributions 		 * Pick cooperative pool if it passes the admissions check
4007*fdd8201dSApple OSS Distributions 		 */
4008*fdd8201dSApple OSS Distributions 		if (workq_cooperative_allowance(wq, req_tmp->tr_qos, uth, true)) {
4009*fdd8201dSApple OSS Distributions 			req_qos = req_tmp;
4010*fdd8201dSApple OSS Distributions 			qos = req_qos->tr_qos;
4011*fdd8201dSApple OSS Distributions 		}
4012*fdd8201dSApple OSS Distributions 	}
4013*fdd8201dSApple OSS Distributions 
4014*fdd8201dSApple OSS Distributions 	/*
4015*fdd8201dSApple OSS Distributions 	 * Compare the best QoS so far - either from overcommit or from cooperative
4016*fdd8201dSApple OSS Distributions 	 * pool - and compare it with the constrained pool
4017*fdd8201dSApple OSS Distributions 	 */
4018*fdd8201dSApple OSS Distributions 	req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4019*fdd8201dSApple OSS Distributions 	    struct workq_threadreq_s, tr_entry);
4020*fdd8201dSApple OSS Distributions 
4021*fdd8201dSApple OSS Distributions 	if (req_tmp && qos < req_tmp->tr_qos) {
4022*fdd8201dSApple OSS Distributions 		/*
4023*fdd8201dSApple OSS Distributions 		 * Constrained pool is best in QoS between overcommit, cooperative
4024*fdd8201dSApple OSS Distributions 		 * and constrained. Now check how it fairs against the priority case
4025*fdd8201dSApple OSS Distributions 		 */
4026*fdd8201dSApple OSS Distributions 		if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4027*fdd8201dSApple OSS Distributions 			return req_pri;
4028*fdd8201dSApple OSS Distributions 		}
4029*fdd8201dSApple OSS Distributions 
4030*fdd8201dSApple OSS Distributions 		if (workq_constrained_allowance(wq, req_tmp->tr_qos, uth, true)) {
4031*fdd8201dSApple OSS Distributions 			/*
4032*fdd8201dSApple OSS Distributions 			 * If the constrained thread request is the best one and passes
4033*fdd8201dSApple OSS Distributions 			 * the admission check, pick it.
4034*fdd8201dSApple OSS Distributions 			 */
4035*fdd8201dSApple OSS Distributions 			return req_tmp;
4036*fdd8201dSApple OSS Distributions 		}
4037*fdd8201dSApple OSS Distributions 	}
4038*fdd8201dSApple OSS Distributions 
4039*fdd8201dSApple OSS Distributions 	if (req_pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4040*fdd8201dSApple OSS Distributions 		return req_pri;
4041*fdd8201dSApple OSS Distributions 	}
4042*fdd8201dSApple OSS Distributions 
4043*fdd8201dSApple OSS Distributions 	return req_qos;
4044*fdd8201dSApple OSS Distributions }
4045*fdd8201dSApple OSS Distributions 
4046*fdd8201dSApple OSS Distributions /*
4047*fdd8201dSApple OSS Distributions  * The creator is an anonymous thread that is counted as scheduled,
4048*fdd8201dSApple OSS Distributions  * but otherwise without its scheduler callback set or tracked as active
4049*fdd8201dSApple OSS Distributions  * that is used to make other threads.
4050*fdd8201dSApple OSS Distributions  *
4051*fdd8201dSApple OSS Distributions  * When more requests are added or an existing one is hurried along,
4052*fdd8201dSApple OSS Distributions  * a creator is elected and setup, or the existing one overridden accordingly.
4053*fdd8201dSApple OSS Distributions  *
4054*fdd8201dSApple OSS Distributions  * While this creator is in flight, because no request has been dequeued,
4055*fdd8201dSApple OSS Distributions  * already running threads have a chance at stealing thread requests avoiding
4056*fdd8201dSApple OSS Distributions  * useless context switches, and the creator once scheduled may not find any
4057*fdd8201dSApple OSS Distributions  * work to do and will then just park again.
4058*fdd8201dSApple OSS Distributions  *
4059*fdd8201dSApple OSS Distributions  * The creator serves the dual purpose of informing the scheduler of work that
4060*fdd8201dSApple OSS Distributions  * hasn't be materialized as threads yet, and also as a natural pacing mechanism
4061*fdd8201dSApple OSS Distributions  * for thread creation.
4062*fdd8201dSApple OSS Distributions  *
4063*fdd8201dSApple OSS Distributions  * By being anonymous (and not bound to anything) it means that thread requests
4064*fdd8201dSApple OSS Distributions  * can be stolen from this creator by threads already on core yielding more
4065*fdd8201dSApple OSS Distributions  * efficient scheduling and reduced context switches.
4066*fdd8201dSApple OSS Distributions  */
4067*fdd8201dSApple OSS Distributions static void
workq_schedule_creator(proc_t p,struct workqueue * wq,workq_kern_threadreq_flags_t flags)4068*fdd8201dSApple OSS Distributions workq_schedule_creator(proc_t p, struct workqueue *wq,
4069*fdd8201dSApple OSS Distributions     workq_kern_threadreq_flags_t flags)
4070*fdd8201dSApple OSS Distributions {
4071*fdd8201dSApple OSS Distributions 	workq_threadreq_t req;
4072*fdd8201dSApple OSS Distributions 	struct uthread *uth;
4073*fdd8201dSApple OSS Distributions 	bool needs_wakeup;
4074*fdd8201dSApple OSS Distributions 
4075*fdd8201dSApple OSS Distributions 	workq_lock_held(wq);
4076*fdd8201dSApple OSS Distributions 	assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
4077*fdd8201dSApple OSS Distributions 
4078*fdd8201dSApple OSS Distributions again:
4079*fdd8201dSApple OSS Distributions 	uth = wq->wq_creator;
4080*fdd8201dSApple OSS Distributions 
4081*fdd8201dSApple OSS Distributions 	if (!wq->wq_reqcount) {
4082*fdd8201dSApple OSS Distributions 		/*
4083*fdd8201dSApple OSS Distributions 		 * There is no thread request left.
4084*fdd8201dSApple OSS Distributions 		 *
4085*fdd8201dSApple OSS Distributions 		 * If there is a creator, leave everything in place, so that it cleans
4086*fdd8201dSApple OSS Distributions 		 * up itself in workq_push_idle_thread().
4087*fdd8201dSApple OSS Distributions 		 *
4088*fdd8201dSApple OSS Distributions 		 * Else, make sure the turnstile state is reset to no inheritor.
4089*fdd8201dSApple OSS Distributions 		 */
4090*fdd8201dSApple OSS Distributions 		if (uth == NULL) {
4091*fdd8201dSApple OSS Distributions 			workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4092*fdd8201dSApple OSS Distributions 		}
4093*fdd8201dSApple OSS Distributions 		return;
4094*fdd8201dSApple OSS Distributions 	}
4095*fdd8201dSApple OSS Distributions 
4096*fdd8201dSApple OSS Distributions 	req = workq_threadreq_select_for_creator(wq);
4097*fdd8201dSApple OSS Distributions 	if (req == NULL) {
4098*fdd8201dSApple OSS Distributions 		/*
4099*fdd8201dSApple OSS Distributions 		 * There isn't a thread request that passes the admission check.
4100*fdd8201dSApple OSS Distributions 		 *
4101*fdd8201dSApple OSS Distributions 		 * If there is a creator, do not touch anything, the creator will sort
4102*fdd8201dSApple OSS Distributions 		 * it out when it runs.
4103*fdd8201dSApple OSS Distributions 		 *
4104*fdd8201dSApple OSS Distributions 		 * Else, set the inheritor to "WORKQ" so that the turnstile propagation
4105*fdd8201dSApple OSS Distributions 		 * code calls us if anything changes.
4106*fdd8201dSApple OSS Distributions 		 */
4107*fdd8201dSApple OSS Distributions 		if (uth == NULL) {
4108*fdd8201dSApple OSS Distributions 			workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
4109*fdd8201dSApple OSS Distributions 		}
4110*fdd8201dSApple OSS Distributions 		return;
4111*fdd8201dSApple OSS Distributions 	}
4112*fdd8201dSApple OSS Distributions 
4113*fdd8201dSApple OSS Distributions 
4114*fdd8201dSApple OSS Distributions 	if (uth) {
4115*fdd8201dSApple OSS Distributions 		/*
4116*fdd8201dSApple OSS Distributions 		 * We need to maybe override the creator we already have
4117*fdd8201dSApple OSS Distributions 		 */
4118*fdd8201dSApple OSS Distributions 		if (workq_thread_needs_priority_change(req, uth)) {
4119*fdd8201dSApple OSS Distributions 			WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4120*fdd8201dSApple OSS Distributions 			    wq, 1, uthread_tid(uth), req->tr_qos);
4121*fdd8201dSApple OSS Distributions 			workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4122*fdd8201dSApple OSS Distributions 		}
4123*fdd8201dSApple OSS Distributions 		assert(wq->wq_inheritor == get_machthread(uth));
4124*fdd8201dSApple OSS Distributions 	} else if (wq->wq_thidlecount) {
4125*fdd8201dSApple OSS Distributions 		/*
4126*fdd8201dSApple OSS Distributions 		 * We need to unpark a creator thread
4127*fdd8201dSApple OSS Distributions 		 */
4128*fdd8201dSApple OSS Distributions 		wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
4129*fdd8201dSApple OSS Distributions 		    &needs_wakeup);
4130*fdd8201dSApple OSS Distributions 		/* Always reset the priorities on the newly chosen creator */
4131*fdd8201dSApple OSS Distributions 		workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4132*fdd8201dSApple OSS Distributions 		workq_turnstile_update_inheritor(wq, get_machthread(uth),
4133*fdd8201dSApple OSS Distributions 		    TURNSTILE_INHERITOR_THREAD);
4134*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4135*fdd8201dSApple OSS Distributions 		    wq, 2, uthread_tid(uth), req->tr_qos);
4136*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4137*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.yields = 0;
4138*fdd8201dSApple OSS Distributions 		if (needs_wakeup) {
4139*fdd8201dSApple OSS Distributions 			workq_thread_wakeup(uth);
4140*fdd8201dSApple OSS Distributions 		}
4141*fdd8201dSApple OSS Distributions 	} else {
4142*fdd8201dSApple OSS Distributions 		/*
4143*fdd8201dSApple OSS Distributions 		 * We need to allocate a thread...
4144*fdd8201dSApple OSS Distributions 		 */
4145*fdd8201dSApple OSS Distributions 		if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
4146*fdd8201dSApple OSS Distributions 			/* out of threads, just go away */
4147*fdd8201dSApple OSS Distributions 			flags = WORKQ_THREADREQ_NONE;
4148*fdd8201dSApple OSS Distributions 		} else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
4149*fdd8201dSApple OSS Distributions 			act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
4150*fdd8201dSApple OSS Distributions 		} else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
4151*fdd8201dSApple OSS Distributions 			/* This can drop the workqueue lock, and take it again */
4152*fdd8201dSApple OSS Distributions 			workq_schedule_immediate_thread_creation(wq);
4153*fdd8201dSApple OSS Distributions 		} else if (workq_add_new_idle_thread(p, wq)) {
4154*fdd8201dSApple OSS Distributions 			goto again;
4155*fdd8201dSApple OSS Distributions 		} else {
4156*fdd8201dSApple OSS Distributions 			workq_schedule_delayed_thread_creation(wq, 0);
4157*fdd8201dSApple OSS Distributions 		}
4158*fdd8201dSApple OSS Distributions 
4159*fdd8201dSApple OSS Distributions 		/*
4160*fdd8201dSApple OSS Distributions 		 * If the current thread is the inheritor:
4161*fdd8201dSApple OSS Distributions 		 *
4162*fdd8201dSApple OSS Distributions 		 * If we set the AST, then the thread will stay the inheritor until
4163*fdd8201dSApple OSS Distributions 		 * either the AST calls workq_kern_threadreq_redrive(), or it parks
4164*fdd8201dSApple OSS Distributions 		 * and calls workq_push_idle_thread().
4165*fdd8201dSApple OSS Distributions 		 *
4166*fdd8201dSApple OSS Distributions 		 * Else, the responsibility of the thread creation is with a thread-call
4167*fdd8201dSApple OSS Distributions 		 * and we need to clear the inheritor.
4168*fdd8201dSApple OSS Distributions 		 */
4169*fdd8201dSApple OSS Distributions 		if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
4170*fdd8201dSApple OSS Distributions 		    wq->wq_inheritor == current_thread()) {
4171*fdd8201dSApple OSS Distributions 			workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4172*fdd8201dSApple OSS Distributions 		}
4173*fdd8201dSApple OSS Distributions 	}
4174*fdd8201dSApple OSS Distributions }
4175*fdd8201dSApple OSS Distributions 
4176*fdd8201dSApple OSS Distributions /**
4177*fdd8201dSApple OSS Distributions  * Same as workq_unpark_select_threadreq_or_park_and_unlock,
4178*fdd8201dSApple OSS Distributions  * but do not allow early binds.
4179*fdd8201dSApple OSS Distributions  *
4180*fdd8201dSApple OSS Distributions  * Called with the base pri frozen, will unfreeze it.
4181*fdd8201dSApple OSS Distributions  */
4182*fdd8201dSApple OSS Distributions __attribute__((noreturn, noinline))
4183*fdd8201dSApple OSS Distributions static void
workq_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4184*fdd8201dSApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4185*fdd8201dSApple OSS Distributions     struct uthread *uth, uint32_t setup_flags)
4186*fdd8201dSApple OSS Distributions {
4187*fdd8201dSApple OSS Distributions 	workq_threadreq_t req = NULL;
4188*fdd8201dSApple OSS Distributions 	bool is_creator = (wq->wq_creator == uth);
4189*fdd8201dSApple OSS Distributions 	bool schedule_creator = false;
4190*fdd8201dSApple OSS Distributions 
4191*fdd8201dSApple OSS Distributions 	if (__improbable(_wq_exiting(wq))) {
4192*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0);
4193*fdd8201dSApple OSS Distributions 		goto park;
4194*fdd8201dSApple OSS Distributions 	}
4195*fdd8201dSApple OSS Distributions 
4196*fdd8201dSApple OSS Distributions 	if (wq->wq_reqcount == 0) {
4197*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0);
4198*fdd8201dSApple OSS Distributions 		goto park;
4199*fdd8201dSApple OSS Distributions 	}
4200*fdd8201dSApple OSS Distributions 
4201*fdd8201dSApple OSS Distributions 	req = workq_threadreq_select(wq, uth);
4202*fdd8201dSApple OSS Distributions 	if (__improbable(req == NULL)) {
4203*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0);
4204*fdd8201dSApple OSS Distributions 		goto park;
4205*fdd8201dSApple OSS Distributions 	}
4206*fdd8201dSApple OSS Distributions 
4207*fdd8201dSApple OSS Distributions 	thread_qos_t old_thread_bucket = uth->uu_workq_pri.qos_bucket;
4208*fdd8201dSApple OSS Distributions 	uint8_t tr_flags = req->tr_flags;
4209*fdd8201dSApple OSS Distributions 	struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
4210*fdd8201dSApple OSS Distributions 
4211*fdd8201dSApple OSS Distributions 	/*
4212*fdd8201dSApple OSS Distributions 	 * Attempt to setup ourselves as the new thing to run, moving all priority
4213*fdd8201dSApple OSS Distributions 	 * pushes to ourselves.
4214*fdd8201dSApple OSS Distributions 	 *
4215*fdd8201dSApple OSS Distributions 	 * If the current thread is the creator, then the fact that we are presently
4216*fdd8201dSApple OSS Distributions 	 * running is proof that we'll do something useful, so keep going.
4217*fdd8201dSApple OSS Distributions 	 *
4218*fdd8201dSApple OSS Distributions 	 * For other cases, peek at the AST to know whether the scheduler wants
4219*fdd8201dSApple OSS Distributions 	 * to preempt us, if yes, park instead, and move the thread request
4220*fdd8201dSApple OSS Distributions 	 * turnstile back to the workqueue.
4221*fdd8201dSApple OSS Distributions 	 */
4222*fdd8201dSApple OSS Distributions 	if (req_ts) {
4223*fdd8201dSApple OSS Distributions 		workq_perform_turnstile_operation_locked(wq, ^{
4224*fdd8201dSApple OSS Distributions 			turnstile_update_inheritor(req_ts, get_machthread(uth),
4225*fdd8201dSApple OSS Distributions 			TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
4226*fdd8201dSApple OSS Distributions 			turnstile_update_inheritor_complete(req_ts,
4227*fdd8201dSApple OSS Distributions 			TURNSTILE_INTERLOCK_HELD);
4228*fdd8201dSApple OSS Distributions 		});
4229*fdd8201dSApple OSS Distributions 	}
4230*fdd8201dSApple OSS Distributions 
4231*fdd8201dSApple OSS Distributions 	/* accounting changes of aggregate thscheduled_count and thactive which has
4232*fdd8201dSApple OSS Distributions 	 * to be paired with the workq_thread_reset_pri below so that we have
4233*fdd8201dSApple OSS Distributions 	 * uth->uu_workq_pri match with thactive.
4234*fdd8201dSApple OSS Distributions 	 *
4235*fdd8201dSApple OSS Distributions 	 * This is undone when the thread parks */
4236*fdd8201dSApple OSS Distributions 	if (is_creator) {
4237*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
4238*fdd8201dSApple OSS Distributions 		    uth->uu_save.uus_workq_park_data.yields);
4239*fdd8201dSApple OSS Distributions 		wq->wq_creator = NULL;
4240*fdd8201dSApple OSS Distributions 		_wq_thactive_inc(wq, req->tr_qos);
4241*fdd8201dSApple OSS Distributions 		wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
4242*fdd8201dSApple OSS Distributions 	} else if (old_thread_bucket != req->tr_qos) {
4243*fdd8201dSApple OSS Distributions 		_wq_thactive_move(wq, old_thread_bucket, req->tr_qos);
4244*fdd8201dSApple OSS Distributions 	}
4245*fdd8201dSApple OSS Distributions 	workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4246*fdd8201dSApple OSS Distributions 
4247*fdd8201dSApple OSS Distributions 	/*
4248*fdd8201dSApple OSS Distributions 	 * Make relevant accounting changes for pool specific counts.
4249*fdd8201dSApple OSS Distributions 	 *
4250*fdd8201dSApple OSS Distributions 	 * The schedule counts changing can affect what the next best request
4251*fdd8201dSApple OSS Distributions 	 * for cooperative thread pool is if this request is dequeued.
4252*fdd8201dSApple OSS Distributions 	 */
4253*fdd8201dSApple OSS Distributions 	bool cooperative_sched_count_changed =
4254*fdd8201dSApple OSS Distributions 	    workq_adjust_cooperative_constrained_schedule_counts(wq, uth,
4255*fdd8201dSApple OSS Distributions 	    old_thread_bucket, tr_flags);
4256*fdd8201dSApple OSS Distributions 
4257*fdd8201dSApple OSS Distributions 	if (workq_tr_is_overcommit(tr_flags)) {
4258*fdd8201dSApple OSS Distributions 		workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4259*fdd8201dSApple OSS Distributions 	} else if (workq_tr_is_cooperative(tr_flags)) {
4260*fdd8201dSApple OSS Distributions 		workq_thread_set_type(uth, UT_WORKQ_COOPERATIVE);
4261*fdd8201dSApple OSS Distributions 	} else {
4262*fdd8201dSApple OSS Distributions 		workq_thread_set_type(uth, 0);
4263*fdd8201dSApple OSS Distributions 	}
4264*fdd8201dSApple OSS Distributions 
4265*fdd8201dSApple OSS Distributions 	if (__improbable(thread_unfreeze_base_pri(get_machthread(uth)) && !is_creator)) {
4266*fdd8201dSApple OSS Distributions 		if (req_ts) {
4267*fdd8201dSApple OSS Distributions 			workq_perform_turnstile_operation_locked(wq, ^{
4268*fdd8201dSApple OSS Distributions 				turnstile_update_inheritor(req_ts, wq->wq_turnstile,
4269*fdd8201dSApple OSS Distributions 				TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
4270*fdd8201dSApple OSS Distributions 				turnstile_update_inheritor_complete(req_ts,
4271*fdd8201dSApple OSS Distributions 				TURNSTILE_INTERLOCK_HELD);
4272*fdd8201dSApple OSS Distributions 			});
4273*fdd8201dSApple OSS Distributions 		}
4274*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0);
4275*fdd8201dSApple OSS Distributions 		goto park_thawed;
4276*fdd8201dSApple OSS Distributions 	}
4277*fdd8201dSApple OSS Distributions 
4278*fdd8201dSApple OSS Distributions 	/*
4279*fdd8201dSApple OSS Distributions 	 * We passed all checks, dequeue the request, bind to it, and set it up
4280*fdd8201dSApple OSS Distributions 	 * to return to user.
4281*fdd8201dSApple OSS Distributions 	 */
4282*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4283*fdd8201dSApple OSS Distributions 	    workq_trace_req_id(req), tr_flags, 0);
4284*fdd8201dSApple OSS Distributions 	wq->wq_fulfilled++;
4285*fdd8201dSApple OSS Distributions 	schedule_creator = workq_threadreq_dequeue(wq, req,
4286*fdd8201dSApple OSS Distributions 	    cooperative_sched_count_changed);
4287*fdd8201dSApple OSS Distributions 
4288*fdd8201dSApple OSS Distributions 	workq_thread_reset_cpupercent(req, uth);
4289*fdd8201dSApple OSS Distributions 
4290*fdd8201dSApple OSS Distributions 	if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4291*fdd8201dSApple OSS Distributions 		kqueue_threadreq_bind_prepost(p, req, uth);
4292*fdd8201dSApple OSS Distributions 		req = NULL;
4293*fdd8201dSApple OSS Distributions 	} else if (req->tr_count > 0) {
4294*fdd8201dSApple OSS Distributions 		req = NULL;
4295*fdd8201dSApple OSS Distributions 	}
4296*fdd8201dSApple OSS Distributions 
4297*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4298*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags ^= UT_WORKQ_NEW;
4299*fdd8201dSApple OSS Distributions 		setup_flags |= WQ_SETUP_FIRST_USE;
4300*fdd8201dSApple OSS Distributions 	}
4301*fdd8201dSApple OSS Distributions 
4302*fdd8201dSApple OSS Distributions 	/* If one of the following is true, call workq_schedule_creator (which also
4303*fdd8201dSApple OSS Distributions 	 * adjusts priority of existing creator):
4304*fdd8201dSApple OSS Distributions 	 *
4305*fdd8201dSApple OSS Distributions 	 *	  - We are the creator currently so the wq may need a new creator
4306*fdd8201dSApple OSS Distributions 	 *	  - The request we're binding to is the highest priority one, existing
4307*fdd8201dSApple OSS Distributions 	 *	  creator's priority might need to be adjusted to reflect the next
4308*fdd8201dSApple OSS Distributions 	 *	  highest TR
4309*fdd8201dSApple OSS Distributions 	 */
4310*fdd8201dSApple OSS Distributions 	if (is_creator || schedule_creator) {
4311*fdd8201dSApple OSS Distributions 		/* This can drop the workqueue lock, and take it again */
4312*fdd8201dSApple OSS Distributions 		workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
4313*fdd8201dSApple OSS Distributions 	}
4314*fdd8201dSApple OSS Distributions 
4315*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
4316*fdd8201dSApple OSS Distributions 
4317*fdd8201dSApple OSS Distributions 	if (req) {
4318*fdd8201dSApple OSS Distributions 		zfree(workq_zone_threadreq, req);
4319*fdd8201dSApple OSS Distributions 	}
4320*fdd8201dSApple OSS Distributions 
4321*fdd8201dSApple OSS Distributions 	/*
4322*fdd8201dSApple OSS Distributions 	 * Run Thread, Run!
4323*fdd8201dSApple OSS Distributions 	 */
4324*fdd8201dSApple OSS Distributions 	uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
4325*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
4326*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
4327*fdd8201dSApple OSS Distributions 	} else if (workq_tr_is_overcommit(tr_flags)) {
4328*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4329*fdd8201dSApple OSS Distributions 	} else if (workq_tr_is_cooperative(tr_flags)) {
4330*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
4331*fdd8201dSApple OSS Distributions 	}
4332*fdd8201dSApple OSS Distributions 	if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
4333*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_KEVENT;
4334*fdd8201dSApple OSS Distributions 		assert((upcall_flags & WQ_FLAG_THREAD_COOPERATIVE) == 0);
4335*fdd8201dSApple OSS Distributions 	}
4336*fdd8201dSApple OSS Distributions 
4337*fdd8201dSApple OSS Distributions 	if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
4338*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
4339*fdd8201dSApple OSS Distributions 	}
4340*fdd8201dSApple OSS Distributions 	uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4341*fdd8201dSApple OSS Distributions 
4342*fdd8201dSApple OSS Distributions 	if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4343*fdd8201dSApple OSS Distributions 		kqueue_threadreq_bind_commit(p, get_machthread(uth));
4344*fdd8201dSApple OSS Distributions 	} else {
4345*fdd8201dSApple OSS Distributions #if CONFIG_PREADOPT_TG
4346*fdd8201dSApple OSS Distributions 		/*
4347*fdd8201dSApple OSS Distributions 		 * The thread may have a preadopt thread group on it already because it
4348*fdd8201dSApple OSS Distributions 		 * got tagged with it as a creator thread. So we need to make sure to
4349*fdd8201dSApple OSS Distributions 		 * clear that since we don't have preadoption for anonymous thread
4350*fdd8201dSApple OSS Distributions 		 * requests
4351*fdd8201dSApple OSS Distributions 		 */
4352*fdd8201dSApple OSS Distributions 		thread_set_preadopt_thread_group(get_machthread(uth), NULL);
4353*fdd8201dSApple OSS Distributions #endif
4354*fdd8201dSApple OSS Distributions 	}
4355*fdd8201dSApple OSS Distributions 
4356*fdd8201dSApple OSS Distributions 	workq_setup_and_run(p, uth, setup_flags);
4357*fdd8201dSApple OSS Distributions 	__builtin_unreachable();
4358*fdd8201dSApple OSS Distributions 
4359*fdd8201dSApple OSS Distributions park:
4360*fdd8201dSApple OSS Distributions 	thread_unfreeze_base_pri(get_machthread(uth));
4361*fdd8201dSApple OSS Distributions park_thawed:
4362*fdd8201dSApple OSS Distributions 	workq_park_and_unlock(p, wq, uth, setup_flags);
4363*fdd8201dSApple OSS Distributions }
4364*fdd8201dSApple OSS Distributions 
4365*fdd8201dSApple OSS Distributions /**
4366*fdd8201dSApple OSS Distributions  * Runs a thread request on a thread
4367*fdd8201dSApple OSS Distributions  *
4368*fdd8201dSApple OSS Distributions  * - if thread is THREAD_NULL, will find a thread and run the request there.
4369*fdd8201dSApple OSS Distributions  *   Otherwise, the thread must be the current thread.
4370*fdd8201dSApple OSS Distributions  *
4371*fdd8201dSApple OSS Distributions  * - if req is NULL, will find the highest priority request and run that.  If
4372*fdd8201dSApple OSS Distributions  *   it is not NULL, it must be a threadreq object in state NEW.  If it can not
4373*fdd8201dSApple OSS Distributions  *   be run immediately, it will be enqueued and moved to state QUEUED.
4374*fdd8201dSApple OSS Distributions  *
4375*fdd8201dSApple OSS Distributions  *   Either way, the thread request object serviced will be moved to state
4376*fdd8201dSApple OSS Distributions  *   BINDING and attached to the uthread.
4377*fdd8201dSApple OSS Distributions  *
4378*fdd8201dSApple OSS Distributions  * Should be called with the workqueue lock held.  Will drop it.
4379*fdd8201dSApple OSS Distributions  * Should be called with the base pri not frozen.
4380*fdd8201dSApple OSS Distributions  */
4381*fdd8201dSApple OSS Distributions __attribute__((noreturn, noinline))
4382*fdd8201dSApple OSS Distributions static void
workq_unpark_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4383*fdd8201dSApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4384*fdd8201dSApple OSS Distributions     struct uthread *uth, uint32_t setup_flags)
4385*fdd8201dSApple OSS Distributions {
4386*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
4387*fdd8201dSApple OSS Distributions 		if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4388*fdd8201dSApple OSS Distributions 			setup_flags |= WQ_SETUP_FIRST_USE;
4389*fdd8201dSApple OSS Distributions 		}
4390*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags &= ~(UT_WORKQ_NEW | UT_WORKQ_EARLY_BOUND);
4391*fdd8201dSApple OSS Distributions 		/*
4392*fdd8201dSApple OSS Distributions 		 * This pointer is possibly freed and only used for tracing purposes.
4393*fdd8201dSApple OSS Distributions 		 */
4394*fdd8201dSApple OSS Distributions 		workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
4395*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
4396*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4397*fdd8201dSApple OSS Distributions 		    VM_KERNEL_ADDRHIDE(req), 0, 0);
4398*fdd8201dSApple OSS Distributions 		(void)req;
4399*fdd8201dSApple OSS Distributions 
4400*fdd8201dSApple OSS Distributions 		workq_setup_and_run(p, uth, setup_flags);
4401*fdd8201dSApple OSS Distributions 		__builtin_unreachable();
4402*fdd8201dSApple OSS Distributions 	}
4403*fdd8201dSApple OSS Distributions 
4404*fdd8201dSApple OSS Distributions 	thread_freeze_base_pri(get_machthread(uth));
4405*fdd8201dSApple OSS Distributions 	workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
4406*fdd8201dSApple OSS Distributions }
4407*fdd8201dSApple OSS Distributions 
4408*fdd8201dSApple OSS Distributions static bool
workq_creator_should_yield(struct workqueue * wq,struct uthread * uth)4409*fdd8201dSApple OSS Distributions workq_creator_should_yield(struct workqueue *wq, struct uthread *uth)
4410*fdd8201dSApple OSS Distributions {
4411*fdd8201dSApple OSS Distributions 	thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
4412*fdd8201dSApple OSS Distributions 
4413*fdd8201dSApple OSS Distributions 	if (qos >= THREAD_QOS_USER_INTERACTIVE) {
4414*fdd8201dSApple OSS Distributions 		return false;
4415*fdd8201dSApple OSS Distributions 	}
4416*fdd8201dSApple OSS Distributions 
4417*fdd8201dSApple OSS Distributions 	uint32_t snapshot = uth->uu_save.uus_workq_park_data.fulfilled_snapshot;
4418*fdd8201dSApple OSS Distributions 	if (wq->wq_fulfilled == snapshot) {
4419*fdd8201dSApple OSS Distributions 		return false;
4420*fdd8201dSApple OSS Distributions 	}
4421*fdd8201dSApple OSS Distributions 
4422*fdd8201dSApple OSS Distributions 	uint32_t cnt = 0, conc = wq_max_parallelism[_wq_bucket(qos)];
4423*fdd8201dSApple OSS Distributions 	if (wq->wq_fulfilled - snapshot > conc) {
4424*fdd8201dSApple OSS Distributions 		/* we fulfilled more than NCPU requests since being dispatched */
4425*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
4426*fdd8201dSApple OSS Distributions 		    wq->wq_fulfilled, snapshot);
4427*fdd8201dSApple OSS Distributions 		return true;
4428*fdd8201dSApple OSS Distributions 	}
4429*fdd8201dSApple OSS Distributions 
4430*fdd8201dSApple OSS Distributions 	for (uint8_t i = _wq_bucket(qos); i < WORKQ_NUM_QOS_BUCKETS; i++) {
4431*fdd8201dSApple OSS Distributions 		cnt += wq->wq_thscheduled_count[i];
4432*fdd8201dSApple OSS Distributions 	}
4433*fdd8201dSApple OSS Distributions 	if (conc <= cnt) {
4434*fdd8201dSApple OSS Distributions 		/* We fulfilled requests and have more than NCPU scheduled threads */
4435*fdd8201dSApple OSS Distributions 		WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
4436*fdd8201dSApple OSS Distributions 		    wq->wq_fulfilled, snapshot);
4437*fdd8201dSApple OSS Distributions 		return true;
4438*fdd8201dSApple OSS Distributions 	}
4439*fdd8201dSApple OSS Distributions 
4440*fdd8201dSApple OSS Distributions 	return false;
4441*fdd8201dSApple OSS Distributions }
4442*fdd8201dSApple OSS Distributions 
4443*fdd8201dSApple OSS Distributions /**
4444*fdd8201dSApple OSS Distributions  * parked thread wakes up
4445*fdd8201dSApple OSS Distributions  */
4446*fdd8201dSApple OSS Distributions __attribute__((noreturn, noinline))
4447*fdd8201dSApple OSS Distributions static void
workq_unpark_continue(void * parameter __unused,wait_result_t wr __unused)4448*fdd8201dSApple OSS Distributions workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
4449*fdd8201dSApple OSS Distributions {
4450*fdd8201dSApple OSS Distributions 	thread_t th = current_thread();
4451*fdd8201dSApple OSS Distributions 	struct uthread *uth = get_bsdthread_info(th);
4452*fdd8201dSApple OSS Distributions 	proc_t p = current_proc();
4453*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr_fast(p);
4454*fdd8201dSApple OSS Distributions 
4455*fdd8201dSApple OSS Distributions 	workq_lock_spin(wq);
4456*fdd8201dSApple OSS Distributions 
4457*fdd8201dSApple OSS Distributions 	if (wq->wq_creator == uth && workq_creator_should_yield(wq, uth)) {
4458*fdd8201dSApple OSS Distributions 		/*
4459*fdd8201dSApple OSS Distributions 		 * If the number of threads we have out are able to keep up with the
4460*fdd8201dSApple OSS Distributions 		 * demand, then we should avoid sending this creator thread to
4461*fdd8201dSApple OSS Distributions 		 * userspace.
4462*fdd8201dSApple OSS Distributions 		 */
4463*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4464*fdd8201dSApple OSS Distributions 		uth->uu_save.uus_workq_park_data.yields++;
4465*fdd8201dSApple OSS Distributions 		workq_unlock(wq);
4466*fdd8201dSApple OSS Distributions 		thread_yield_with_continuation(workq_unpark_continue, NULL);
4467*fdd8201dSApple OSS Distributions 		__builtin_unreachable();
4468*fdd8201dSApple OSS Distributions 	}
4469*fdd8201dSApple OSS Distributions 
4470*fdd8201dSApple OSS Distributions 	if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
4471*fdd8201dSApple OSS Distributions 		workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
4472*fdd8201dSApple OSS Distributions 		__builtin_unreachable();
4473*fdd8201dSApple OSS Distributions 	}
4474*fdd8201dSApple OSS Distributions 
4475*fdd8201dSApple OSS Distributions 	if (__probable(wr == THREAD_AWAKENED)) {
4476*fdd8201dSApple OSS Distributions 		/*
4477*fdd8201dSApple OSS Distributions 		 * We were set running, but for the purposes of dying.
4478*fdd8201dSApple OSS Distributions 		 */
4479*fdd8201dSApple OSS Distributions 		assert(uth->uu_workq_flags & UT_WORKQ_DYING);
4480*fdd8201dSApple OSS Distributions 		assert((uth->uu_workq_flags & UT_WORKQ_NEW) == 0);
4481*fdd8201dSApple OSS Distributions 	} else {
4482*fdd8201dSApple OSS Distributions 		/*
4483*fdd8201dSApple OSS Distributions 		 * workaround for <rdar://problem/38647347>,
4484*fdd8201dSApple OSS Distributions 		 * in case we do hit userspace, make sure calling
4485*fdd8201dSApple OSS Distributions 		 * workq_thread_terminate() does the right thing here,
4486*fdd8201dSApple OSS Distributions 		 * and if we never call it, that workq_exit() will too because it sees
4487*fdd8201dSApple OSS Distributions 		 * this thread on the runlist.
4488*fdd8201dSApple OSS Distributions 		 */
4489*fdd8201dSApple OSS Distributions 		assert(wr == THREAD_INTERRUPTED);
4490*fdd8201dSApple OSS Distributions 		wq->wq_thdying_count++;
4491*fdd8201dSApple OSS Distributions 		uth->uu_workq_flags |= UT_WORKQ_DYING;
4492*fdd8201dSApple OSS Distributions 	}
4493*fdd8201dSApple OSS Distributions 
4494*fdd8201dSApple OSS Distributions 	workq_unpark_for_death_and_unlock(p, wq, uth,
4495*fdd8201dSApple OSS Distributions 	    WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
4496*fdd8201dSApple OSS Distributions 	__builtin_unreachable();
4497*fdd8201dSApple OSS Distributions }
4498*fdd8201dSApple OSS Distributions 
4499*fdd8201dSApple OSS Distributions __attribute__((noreturn, noinline))
4500*fdd8201dSApple OSS Distributions static void
workq_setup_and_run(proc_t p,struct uthread * uth,int setup_flags)4501*fdd8201dSApple OSS Distributions workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags)
4502*fdd8201dSApple OSS Distributions {
4503*fdd8201dSApple OSS Distributions 	thread_t th = get_machthread(uth);
4504*fdd8201dSApple OSS Distributions 	vm_map_t vmap = get_task_map(p->task);
4505*fdd8201dSApple OSS Distributions 
4506*fdd8201dSApple OSS Distributions 	if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
4507*fdd8201dSApple OSS Distributions 		/*
4508*fdd8201dSApple OSS Distributions 		 * For preemption reasons, we want to reset the voucher as late as
4509*fdd8201dSApple OSS Distributions 		 * possible, so we do it in two places:
4510*fdd8201dSApple OSS Distributions 		 *   - Just before parking (i.e. in workq_park_and_unlock())
4511*fdd8201dSApple OSS Distributions 		 *   - Prior to doing the setup for the next workitem (i.e. here)
4512*fdd8201dSApple OSS Distributions 		 *
4513*fdd8201dSApple OSS Distributions 		 * Those two places are sufficient to ensure we always reset it before
4514*fdd8201dSApple OSS Distributions 		 * it goes back out to user space, but be careful to not break that
4515*fdd8201dSApple OSS Distributions 		 * guarantee.
4516*fdd8201dSApple OSS Distributions 		 *
4517*fdd8201dSApple OSS Distributions 		 * Note that setting the voucher to NULL will not clear the preadoption
4518*fdd8201dSApple OSS Distributions 		 * thread group on this thread
4519*fdd8201dSApple OSS Distributions 		 */
4520*fdd8201dSApple OSS Distributions 		__assert_only kern_return_t kr;
4521*fdd8201dSApple OSS Distributions 		kr = thread_set_voucher_name(MACH_PORT_NULL);
4522*fdd8201dSApple OSS Distributions 		assert(kr == KERN_SUCCESS);
4523*fdd8201dSApple OSS Distributions 	}
4524*fdd8201dSApple OSS Distributions 
4525*fdd8201dSApple OSS Distributions 	uint32_t upcall_flags = uth->uu_save.uus_workq_park_data.upcall_flags;
4526*fdd8201dSApple OSS Distributions 	if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
4527*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_REUSE;
4528*fdd8201dSApple OSS Distributions 	}
4529*fdd8201dSApple OSS Distributions 
4530*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
4531*fdd8201dSApple OSS Distributions 		/*
4532*fdd8201dSApple OSS Distributions 		 * For threads that have an outside-of-QoS thread priority, indicate
4533*fdd8201dSApple OSS Distributions 		 * to userspace that setting QoS should only affect the TSD and not
4534*fdd8201dSApple OSS Distributions 		 * change QOS in the kernel.
4535*fdd8201dSApple OSS Distributions 		 */
4536*fdd8201dSApple OSS Distributions 		upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
4537*fdd8201dSApple OSS Distributions 	} else {
4538*fdd8201dSApple OSS Distributions 		/*
4539*fdd8201dSApple OSS Distributions 		 * Put the QoS class value into the lower bits of the reuse_thread
4540*fdd8201dSApple OSS Distributions 		 * register, this is where the thread priority used to be stored
4541*fdd8201dSApple OSS Distributions 		 * anyway.
4542*fdd8201dSApple OSS Distributions 		 */
4543*fdd8201dSApple OSS Distributions 		upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
4544*fdd8201dSApple OSS Distributions 		    WQ_FLAG_THREAD_PRIO_QOS;
4545*fdd8201dSApple OSS Distributions 	}
4546*fdd8201dSApple OSS Distributions 
4547*fdd8201dSApple OSS Distributions 	if (uth->uu_workq_thport == MACH_PORT_NULL) {
4548*fdd8201dSApple OSS Distributions 		/* convert_thread_to_port_pinned() consumes a reference */
4549*fdd8201dSApple OSS Distributions 		thread_reference(th);
4550*fdd8201dSApple OSS Distributions 		/* Convert to immovable/pinned thread port, but port is not pinned yet */
4551*fdd8201dSApple OSS Distributions 		ipc_port_t port = convert_thread_to_port_pinned(th);
4552*fdd8201dSApple OSS Distributions 		/* Atomically, pin and copy out the port */
4553*fdd8201dSApple OSS Distributions 		uth->uu_workq_thport = ipc_port_copyout_send_pinned(port, get_task_ipcspace(p->task));
4554*fdd8201dSApple OSS Distributions 	}
4555*fdd8201dSApple OSS Distributions 
4556*fdd8201dSApple OSS Distributions 	/* Thread has been set up to run, arm its next workqueue quantum or disarm
4557*fdd8201dSApple OSS Distributions 	 * if it is no longer supporting that */
4558*fdd8201dSApple OSS Distributions 	if (thread_supports_cooperative_workqueue(th)) {
4559*fdd8201dSApple OSS Distributions 		thread_arm_workqueue_quantum(th);
4560*fdd8201dSApple OSS Distributions 	} else {
4561*fdd8201dSApple OSS Distributions 		thread_disarm_workqueue_quantum(th);
4562*fdd8201dSApple OSS Distributions 	}
4563*fdd8201dSApple OSS Distributions 
4564*fdd8201dSApple OSS Distributions 	/*
4565*fdd8201dSApple OSS Distributions 	 * Call out to pthread, this sets up the thread, pulls in kevent structs
4566*fdd8201dSApple OSS Distributions 	 * onto the stack, sets up the thread state and then returns to userspace.
4567*fdd8201dSApple OSS Distributions 	 */
4568*fdd8201dSApple OSS Distributions 	WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
4569*fdd8201dSApple OSS Distributions 	    proc_get_wqptr_fast(p), 0, 0, 0);
4570*fdd8201dSApple OSS Distributions 
4571*fdd8201dSApple OSS Distributions 	if (workq_thread_is_cooperative(uth)) {
4572*fdd8201dSApple OSS Distributions 		thread_sched_call(th, NULL);
4573*fdd8201dSApple OSS Distributions 	} else {
4574*fdd8201dSApple OSS Distributions 		thread_sched_call(th, workq_sched_callback);
4575*fdd8201dSApple OSS Distributions 	}
4576*fdd8201dSApple OSS Distributions 
4577*fdd8201dSApple OSS Distributions 	pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
4578*fdd8201dSApple OSS Distributions 	    uth->uu_workq_thport, 0, setup_flags, upcall_flags);
4579*fdd8201dSApple OSS Distributions 
4580*fdd8201dSApple OSS Distributions 	__builtin_unreachable();
4581*fdd8201dSApple OSS Distributions }
4582*fdd8201dSApple OSS Distributions 
4583*fdd8201dSApple OSS Distributions #pragma mark misc
4584*fdd8201dSApple OSS Distributions 
4585*fdd8201dSApple OSS Distributions int
fill_procworkqueue(proc_t p,struct proc_workqueueinfo * pwqinfo)4586*fdd8201dSApple OSS Distributions fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
4587*fdd8201dSApple OSS Distributions {
4588*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
4589*fdd8201dSApple OSS Distributions 	int error = 0;
4590*fdd8201dSApple OSS Distributions 	int     activecount;
4591*fdd8201dSApple OSS Distributions 
4592*fdd8201dSApple OSS Distributions 	if (wq == NULL) {
4593*fdd8201dSApple OSS Distributions 		return EINVAL;
4594*fdd8201dSApple OSS Distributions 	}
4595*fdd8201dSApple OSS Distributions 
4596*fdd8201dSApple OSS Distributions 	/*
4597*fdd8201dSApple OSS Distributions 	 * This is sometimes called from interrupt context by the kperf sampler.
4598*fdd8201dSApple OSS Distributions 	 * In that case, it's not safe to spin trying to take the lock since we
4599*fdd8201dSApple OSS Distributions 	 * might already hold it.  So, we just try-lock it and error out if it's
4600*fdd8201dSApple OSS Distributions 	 * already held.  Since this is just a debugging aid, and all our callers
4601*fdd8201dSApple OSS Distributions 	 * are able to handle an error, that's fine.
4602*fdd8201dSApple OSS Distributions 	 */
4603*fdd8201dSApple OSS Distributions 	bool locked = workq_lock_try(wq);
4604*fdd8201dSApple OSS Distributions 	if (!locked) {
4605*fdd8201dSApple OSS Distributions 		return EBUSY;
4606*fdd8201dSApple OSS Distributions 	}
4607*fdd8201dSApple OSS Distributions 
4608*fdd8201dSApple OSS Distributions 	wq_thactive_t act = _wq_thactive(wq);
4609*fdd8201dSApple OSS Distributions 	activecount = _wq_thactive_aggregate_downto_qos(wq, act,
4610*fdd8201dSApple OSS Distributions 	    WORKQ_THREAD_QOS_MIN, NULL, NULL);
4611*fdd8201dSApple OSS Distributions 	if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
4612*fdd8201dSApple OSS Distributions 		activecount++;
4613*fdd8201dSApple OSS Distributions 	}
4614*fdd8201dSApple OSS Distributions 	pwqinfo->pwq_nthreads = wq->wq_nthreads;
4615*fdd8201dSApple OSS Distributions 	pwqinfo->pwq_runthreads = activecount;
4616*fdd8201dSApple OSS Distributions 	pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
4617*fdd8201dSApple OSS Distributions 	pwqinfo->pwq_state = 0;
4618*fdd8201dSApple OSS Distributions 
4619*fdd8201dSApple OSS Distributions 	if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
4620*fdd8201dSApple OSS Distributions 		pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
4621*fdd8201dSApple OSS Distributions 	}
4622*fdd8201dSApple OSS Distributions 
4623*fdd8201dSApple OSS Distributions 	if (wq->wq_nthreads >= wq_max_threads) {
4624*fdd8201dSApple OSS Distributions 		pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
4625*fdd8201dSApple OSS Distributions 	}
4626*fdd8201dSApple OSS Distributions 
4627*fdd8201dSApple OSS Distributions 	workq_unlock(wq);
4628*fdd8201dSApple OSS Distributions 	return error;
4629*fdd8201dSApple OSS Distributions }
4630*fdd8201dSApple OSS Distributions 
4631*fdd8201dSApple OSS Distributions boolean_t
workqueue_get_pwq_exceeded(void * v,boolean_t * exceeded_total,boolean_t * exceeded_constrained)4632*fdd8201dSApple OSS Distributions workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
4633*fdd8201dSApple OSS Distributions     boolean_t *exceeded_constrained)
4634*fdd8201dSApple OSS Distributions {
4635*fdd8201dSApple OSS Distributions 	proc_t p = v;
4636*fdd8201dSApple OSS Distributions 	struct proc_workqueueinfo pwqinfo;
4637*fdd8201dSApple OSS Distributions 	int err;
4638*fdd8201dSApple OSS Distributions 
4639*fdd8201dSApple OSS Distributions 	assert(p != NULL);
4640*fdd8201dSApple OSS Distributions 	assert(exceeded_total != NULL);
4641*fdd8201dSApple OSS Distributions 	assert(exceeded_constrained != NULL);
4642*fdd8201dSApple OSS Distributions 
4643*fdd8201dSApple OSS Distributions 	err = fill_procworkqueue(p, &pwqinfo);
4644*fdd8201dSApple OSS Distributions 	if (err) {
4645*fdd8201dSApple OSS Distributions 		return FALSE;
4646*fdd8201dSApple OSS Distributions 	}
4647*fdd8201dSApple OSS Distributions 	if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
4648*fdd8201dSApple OSS Distributions 		return FALSE;
4649*fdd8201dSApple OSS Distributions 	}
4650*fdd8201dSApple OSS Distributions 
4651*fdd8201dSApple OSS Distributions 	*exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
4652*fdd8201dSApple OSS Distributions 	*exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
4653*fdd8201dSApple OSS Distributions 
4654*fdd8201dSApple OSS Distributions 	return TRUE;
4655*fdd8201dSApple OSS Distributions }
4656*fdd8201dSApple OSS Distributions 
4657*fdd8201dSApple OSS Distributions uint32_t
workqueue_get_pwq_state_kdp(void * v)4658*fdd8201dSApple OSS Distributions workqueue_get_pwq_state_kdp(void * v)
4659*fdd8201dSApple OSS Distributions {
4660*fdd8201dSApple OSS Distributions 	static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
4661*fdd8201dSApple OSS Distributions 	    kTaskWqExceededConstrainedThreadLimit);
4662*fdd8201dSApple OSS Distributions 	static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
4663*fdd8201dSApple OSS Distributions 	    kTaskWqExceededTotalThreadLimit);
4664*fdd8201dSApple OSS Distributions 	static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
4665*fdd8201dSApple OSS Distributions 	static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
4666*fdd8201dSApple OSS Distributions 	    WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
4667*fdd8201dSApple OSS Distributions 
4668*fdd8201dSApple OSS Distributions 	if (v == NULL) {
4669*fdd8201dSApple OSS Distributions 		return 0;
4670*fdd8201dSApple OSS Distributions 	}
4671*fdd8201dSApple OSS Distributions 
4672*fdd8201dSApple OSS Distributions 	proc_t p = v;
4673*fdd8201dSApple OSS Distributions 	struct workqueue *wq = proc_get_wqptr(p);
4674*fdd8201dSApple OSS Distributions 
4675*fdd8201dSApple OSS Distributions 	if (wq == NULL || workq_lock_is_acquired_kdp(wq)) {
4676*fdd8201dSApple OSS Distributions 		return 0;
4677*fdd8201dSApple OSS Distributions 	}
4678*fdd8201dSApple OSS Distributions 
4679*fdd8201dSApple OSS Distributions 	uint32_t pwq_state = WQ_FLAGS_AVAILABLE;
4680*fdd8201dSApple OSS Distributions 
4681*fdd8201dSApple OSS Distributions 	if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
4682*fdd8201dSApple OSS Distributions 		pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
4683*fdd8201dSApple OSS Distributions 	}
4684*fdd8201dSApple OSS Distributions 
4685*fdd8201dSApple OSS Distributions 	if (wq->wq_nthreads >= wq_max_threads) {
4686*fdd8201dSApple OSS Distributions 		pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
4687*fdd8201dSApple OSS Distributions 	}
4688*fdd8201dSApple OSS Distributions 
4689*fdd8201dSApple OSS Distributions 	return pwq_state;
4690*fdd8201dSApple OSS Distributions }
4691*fdd8201dSApple OSS Distributions 
4692*fdd8201dSApple OSS Distributions void
workq_init(void)4693*fdd8201dSApple OSS Distributions workq_init(void)
4694*fdd8201dSApple OSS Distributions {
4695*fdd8201dSApple OSS Distributions 	clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
4696*fdd8201dSApple OSS Distributions 	    NSEC_PER_USEC, &wq_stalled_window.abstime);
4697*fdd8201dSApple OSS Distributions 	clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
4698*fdd8201dSApple OSS Distributions 	    NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
4699*fdd8201dSApple OSS Distributions 	clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
4700*fdd8201dSApple OSS Distributions 	    NSEC_PER_USEC, &wq_max_timer_interval.abstime);
4701*fdd8201dSApple OSS Distributions 
4702*fdd8201dSApple OSS Distributions 	thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
4703*fdd8201dSApple OSS Distributions 	    workq_deallocate_queue_invoke);
4704*fdd8201dSApple OSS Distributions }
4705