1*94d3b452SApple OSS Distributions /*
2*94d3b452SApple OSS Distributions * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3*94d3b452SApple OSS Distributions *
4*94d3b452SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*94d3b452SApple OSS Distributions *
6*94d3b452SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*94d3b452SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*94d3b452SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*94d3b452SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*94d3b452SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*94d3b452SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*94d3b452SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*94d3b452SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*94d3b452SApple OSS Distributions *
15*94d3b452SApple OSS Distributions * Please obtain a copy of the License at
16*94d3b452SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*94d3b452SApple OSS Distributions *
18*94d3b452SApple OSS Distributions * The Original Code and all software distributed under the License are
19*94d3b452SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*94d3b452SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*94d3b452SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*94d3b452SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*94d3b452SApple OSS Distributions * Please see the License for the specific language governing rights and
24*94d3b452SApple OSS Distributions * limitations under the License.
25*94d3b452SApple OSS Distributions *
26*94d3b452SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*94d3b452SApple OSS Distributions */
28*94d3b452SApple OSS Distributions /* Copyright (c) 1995-2018 Apple, Inc. All Rights Reserved */
29*94d3b452SApple OSS Distributions
30*94d3b452SApple OSS Distributions #include <sys/cdefs.h>
31*94d3b452SApple OSS Distributions
32*94d3b452SApple OSS Distributions #include <kern/assert.h>
33*94d3b452SApple OSS Distributions #include <kern/ast.h>
34*94d3b452SApple OSS Distributions #include <kern/clock.h>
35*94d3b452SApple OSS Distributions #include <kern/cpu_data.h>
36*94d3b452SApple OSS Distributions #include <kern/kern_types.h>
37*94d3b452SApple OSS Distributions #include <kern/policy_internal.h>
38*94d3b452SApple OSS Distributions #include <kern/processor.h>
39*94d3b452SApple OSS Distributions #include <kern/sched_prim.h> /* for thread_exception_return */
40*94d3b452SApple OSS Distributions #include <kern/task.h>
41*94d3b452SApple OSS Distributions #include <kern/thread.h>
42*94d3b452SApple OSS Distributions #include <kern/thread_group.h>
43*94d3b452SApple OSS Distributions #include <kern/zalloc.h>
44*94d3b452SApple OSS Distributions #include <mach/kern_return.h>
45*94d3b452SApple OSS Distributions #include <mach/mach_param.h>
46*94d3b452SApple OSS Distributions #include <mach/mach_port.h>
47*94d3b452SApple OSS Distributions #include <mach/mach_types.h>
48*94d3b452SApple OSS Distributions #include <mach/mach_vm.h>
49*94d3b452SApple OSS Distributions #include <mach/sync_policy.h>
50*94d3b452SApple OSS Distributions #include <mach/task.h>
51*94d3b452SApple OSS Distributions #include <mach/thread_act.h> /* for thread_resume */
52*94d3b452SApple OSS Distributions #include <mach/thread_policy.h>
53*94d3b452SApple OSS Distributions #include <mach/thread_status.h>
54*94d3b452SApple OSS Distributions #include <mach/vm_prot.h>
55*94d3b452SApple OSS Distributions #include <mach/vm_statistics.h>
56*94d3b452SApple OSS Distributions #include <machine/atomic.h>
57*94d3b452SApple OSS Distributions #include <machine/machine_routines.h>
58*94d3b452SApple OSS Distributions #include <machine/smp.h>
59*94d3b452SApple OSS Distributions #include <vm/vm_map.h>
60*94d3b452SApple OSS Distributions #include <vm/vm_protos.h>
61*94d3b452SApple OSS Distributions
62*94d3b452SApple OSS Distributions #include <sys/eventvar.h>
63*94d3b452SApple OSS Distributions #include <sys/kdebug.h>
64*94d3b452SApple OSS Distributions #include <sys/kernel.h>
65*94d3b452SApple OSS Distributions #include <sys/lock.h>
66*94d3b452SApple OSS Distributions #include <sys/param.h>
67*94d3b452SApple OSS Distributions #include <sys/proc_info.h> /* for fill_procworkqueue */
68*94d3b452SApple OSS Distributions #include <sys/proc_internal.h>
69*94d3b452SApple OSS Distributions #include <sys/pthread_shims.h>
70*94d3b452SApple OSS Distributions #include <sys/resourcevar.h>
71*94d3b452SApple OSS Distributions #include <sys/signalvar.h>
72*94d3b452SApple OSS Distributions #include <sys/sysctl.h>
73*94d3b452SApple OSS Distributions #include <sys/sysproto.h>
74*94d3b452SApple OSS Distributions #include <sys/systm.h>
75*94d3b452SApple OSS Distributions #include <sys/ulock.h> /* for ulock_owner_value_to_port_name */
76*94d3b452SApple OSS Distributions
77*94d3b452SApple OSS Distributions #include <pthread/bsdthread_private.h>
78*94d3b452SApple OSS Distributions #include <pthread/workqueue_syscalls.h>
79*94d3b452SApple OSS Distributions #include <pthread/workqueue_internal.h>
80*94d3b452SApple OSS Distributions #include <pthread/workqueue_trace.h>
81*94d3b452SApple OSS Distributions
82*94d3b452SApple OSS Distributions #include <os/log.h>
83*94d3b452SApple OSS Distributions
84*94d3b452SApple OSS Distributions static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
85*94d3b452SApple OSS Distributions static void workq_schedule_creator(proc_t p, struct workqueue *wq,
86*94d3b452SApple OSS Distributions workq_kern_threadreq_flags_t flags);
87*94d3b452SApple OSS Distributions
88*94d3b452SApple OSS Distributions static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
89*94d3b452SApple OSS Distributions workq_threadreq_t req);
90*94d3b452SApple OSS Distributions
91*94d3b452SApple OSS Distributions static uint32_t workq_constrained_allowance(struct workqueue *wq,
92*94d3b452SApple OSS Distributions thread_qos_t at_qos, struct uthread *uth, bool may_start_timer);
93*94d3b452SApple OSS Distributions
94*94d3b452SApple OSS Distributions static bool _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq);
95*94d3b452SApple OSS Distributions
96*94d3b452SApple OSS Distributions static bool workq_thread_is_busy(uint64_t cur_ts,
97*94d3b452SApple OSS Distributions _Atomic uint64_t *lastblocked_tsp);
98*94d3b452SApple OSS Distributions
99*94d3b452SApple OSS Distributions static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
100*94d3b452SApple OSS Distributions
101*94d3b452SApple OSS Distributions static bool
102*94d3b452SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags);
103*94d3b452SApple OSS Distributions
104*94d3b452SApple OSS Distributions static inline void
105*94d3b452SApple OSS Distributions workq_lock_spin(struct workqueue *wq);
106*94d3b452SApple OSS Distributions
107*94d3b452SApple OSS Distributions static inline void
108*94d3b452SApple OSS Distributions workq_unlock(struct workqueue *wq);
109*94d3b452SApple OSS Distributions
110*94d3b452SApple OSS Distributions #pragma mark globals
111*94d3b452SApple OSS Distributions
112*94d3b452SApple OSS Distributions struct workq_usec_var {
113*94d3b452SApple OSS Distributions uint32_t usecs;
114*94d3b452SApple OSS Distributions uint64_t abstime;
115*94d3b452SApple OSS Distributions };
116*94d3b452SApple OSS Distributions
117*94d3b452SApple OSS Distributions #define WORKQ_SYSCTL_USECS(var, init) \
118*94d3b452SApple OSS Distributions static struct workq_usec_var var = { .usecs = init }; \
119*94d3b452SApple OSS Distributions SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
120*94d3b452SApple OSS Distributions CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
121*94d3b452SApple OSS Distributions workq_sysctl_handle_usecs, "I", "")
122*94d3b452SApple OSS Distributions
123*94d3b452SApple OSS Distributions static LCK_GRP_DECLARE(workq_lck_grp, "workq");
124*94d3b452SApple OSS Distributions os_refgrp_decl(static, workq_refgrp, "workq", NULL);
125*94d3b452SApple OSS Distributions
126*94d3b452SApple OSS Distributions static ZONE_DEFINE(workq_zone_workqueue, "workq.wq",
127*94d3b452SApple OSS Distributions sizeof(struct workqueue), ZC_NONE);
128*94d3b452SApple OSS Distributions static ZONE_DEFINE(workq_zone_threadreq, "workq.threadreq",
129*94d3b452SApple OSS Distributions sizeof(struct workq_threadreq_s), ZC_CACHING);
130*94d3b452SApple OSS Distributions
131*94d3b452SApple OSS Distributions static struct mpsc_daemon_queue workq_deallocate_queue;
132*94d3b452SApple OSS Distributions
133*94d3b452SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
134*94d3b452SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
135*94d3b452SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
136*94d3b452SApple OSS Distributions static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
137*94d3b452SApple OSS Distributions static uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
138*94d3b452SApple OSS Distributions static uint32_t wq_init_constrained_limit = 1;
139*94d3b452SApple OSS Distributions static uint16_t wq_death_max_load;
140*94d3b452SApple OSS Distributions static uint32_t wq_max_parallelism[WORKQ_NUM_QOS_BUCKETS];
141*94d3b452SApple OSS Distributions
142*94d3b452SApple OSS Distributions /*
143*94d3b452SApple OSS Distributions * This is not a hard limit but the max size we want to aim to hit across the
144*94d3b452SApple OSS Distributions * entire cooperative pool. We can oversubscribe the pool due to non-cooperative
145*94d3b452SApple OSS Distributions * workers and the max we will oversubscribe the pool by, is a total of
146*94d3b452SApple OSS Distributions * wq_max_cooperative_threads * WORKQ_NUM_QOS_BUCKETS.
147*94d3b452SApple OSS Distributions */
148*94d3b452SApple OSS Distributions static uint32_t wq_max_cooperative_threads;
149*94d3b452SApple OSS Distributions
150*94d3b452SApple OSS Distributions static inline uint32_t
wq_cooperative_queue_max_size(struct workqueue * wq)151*94d3b452SApple OSS Distributions wq_cooperative_queue_max_size(struct workqueue *wq)
152*94d3b452SApple OSS Distributions {
153*94d3b452SApple OSS Distributions return wq->wq_cooperative_queue_has_limited_max_size ? 1 : wq_max_cooperative_threads;
154*94d3b452SApple OSS Distributions }
155*94d3b452SApple OSS Distributions
156*94d3b452SApple OSS Distributions #pragma mark sysctls
157*94d3b452SApple OSS Distributions
158*94d3b452SApple OSS Distributions static int
159*94d3b452SApple OSS Distributions workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS
160*94d3b452SApple OSS Distributions {
161*94d3b452SApple OSS Distributions #pragma unused(arg2)
162*94d3b452SApple OSS Distributions struct workq_usec_var *v = arg1;
163*94d3b452SApple OSS Distributions int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
164*94d3b452SApple OSS Distributions if (error || !req->newptr) {
165*94d3b452SApple OSS Distributions return error;
166*94d3b452SApple OSS Distributions }
167*94d3b452SApple OSS Distributions clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
168*94d3b452SApple OSS Distributions &v->abstime);
169*94d3b452SApple OSS Distributions return 0;
170*94d3b452SApple OSS Distributions }
171*94d3b452SApple OSS Distributions
172*94d3b452SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
173*94d3b452SApple OSS Distributions &wq_max_threads, 0, "");
174*94d3b452SApple OSS Distributions
175*94d3b452SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
176*94d3b452SApple OSS Distributions &wq_max_constrained_threads, 0, "");
177*94d3b452SApple OSS Distributions
178*94d3b452SApple OSS Distributions static int
179*94d3b452SApple OSS Distributions wq_limit_cooperative_threads_for_proc SYSCTL_HANDLER_ARGS
180*94d3b452SApple OSS Distributions {
181*94d3b452SApple OSS Distributions #pragma unused(arg1, arg2, oidp)
182*94d3b452SApple OSS Distributions int input_pool_size = 0;
183*94d3b452SApple OSS Distributions int changed;
184*94d3b452SApple OSS Distributions int error = 0;
185*94d3b452SApple OSS Distributions
186*94d3b452SApple OSS Distributions error = sysctl_io_number(req, 0, sizeof(int), &input_pool_size, &changed);
187*94d3b452SApple OSS Distributions if (error || !changed) {
188*94d3b452SApple OSS Distributions return error;
189*94d3b452SApple OSS Distributions }
190*94d3b452SApple OSS Distributions
191*94d3b452SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_DEFAULT 0
192*94d3b452SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS -1
193*94d3b452SApple OSS Distributions /* Not available currently, but sysctl interface is designed to allow these
194*94d3b452SApple OSS Distributions * extra parameters:
195*94d3b452SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_STRICT : -2 (across all bucket)
196*94d3b452SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_CUSTOM : [1, 512]
197*94d3b452SApple OSS Distributions */
198*94d3b452SApple OSS Distributions
199*94d3b452SApple OSS Distributions if (input_pool_size != WQ_COOPERATIVE_POOL_SIZE_DEFAULT
200*94d3b452SApple OSS Distributions && input_pool_size != WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS) {
201*94d3b452SApple OSS Distributions error = EINVAL;
202*94d3b452SApple OSS Distributions goto out;
203*94d3b452SApple OSS Distributions }
204*94d3b452SApple OSS Distributions
205*94d3b452SApple OSS Distributions proc_t p = req->p;
206*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
207*94d3b452SApple OSS Distributions
208*94d3b452SApple OSS Distributions if (wq != NULL) {
209*94d3b452SApple OSS Distributions workq_lock_spin(wq);
210*94d3b452SApple OSS Distributions if (wq->wq_reqcount > 0 || wq->wq_nthreads > 0) {
211*94d3b452SApple OSS Distributions // Hackily enforce that the workqueue is still new (no requests or
212*94d3b452SApple OSS Distributions // threads)
213*94d3b452SApple OSS Distributions error = ENOTSUP;
214*94d3b452SApple OSS Distributions } else {
215*94d3b452SApple OSS Distributions wq->wq_cooperative_queue_has_limited_max_size = (input_pool_size == WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS);
216*94d3b452SApple OSS Distributions }
217*94d3b452SApple OSS Distributions workq_unlock(wq);
218*94d3b452SApple OSS Distributions } else {
219*94d3b452SApple OSS Distributions /* This process has no workqueue, calling this syctl makes no sense */
220*94d3b452SApple OSS Distributions return ENOTSUP;
221*94d3b452SApple OSS Distributions }
222*94d3b452SApple OSS Distributions
223*94d3b452SApple OSS Distributions out:
224*94d3b452SApple OSS Distributions return error;
225*94d3b452SApple OSS Distributions }
226*94d3b452SApple OSS Distributions
227*94d3b452SApple OSS Distributions SYSCTL_PROC(_kern, OID_AUTO, wq_limit_cooperative_threads,
228*94d3b452SApple OSS Distributions CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_WR | CTLFLAG_LOCKED | CTLTYPE_INT, 0, 0,
229*94d3b452SApple OSS Distributions wq_limit_cooperative_threads_for_proc,
230*94d3b452SApple OSS Distributions "I", "Modify the max pool size of the cooperative pool");
231*94d3b452SApple OSS Distributions
232*94d3b452SApple OSS Distributions #pragma mark p_wqptr
233*94d3b452SApple OSS Distributions
234*94d3b452SApple OSS Distributions #define WQPTR_IS_INITING_VALUE ((struct workqueue *)~(uintptr_t)0)
235*94d3b452SApple OSS Distributions
236*94d3b452SApple OSS Distributions static struct workqueue *
proc_get_wqptr_fast(struct proc * p)237*94d3b452SApple OSS Distributions proc_get_wqptr_fast(struct proc *p)
238*94d3b452SApple OSS Distributions {
239*94d3b452SApple OSS Distributions return os_atomic_load(&p->p_wqptr, relaxed);
240*94d3b452SApple OSS Distributions }
241*94d3b452SApple OSS Distributions
242*94d3b452SApple OSS Distributions struct workqueue *
proc_get_wqptr(struct proc * p)243*94d3b452SApple OSS Distributions proc_get_wqptr(struct proc *p)
244*94d3b452SApple OSS Distributions {
245*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
246*94d3b452SApple OSS Distributions return wq == WQPTR_IS_INITING_VALUE ? NULL : wq;
247*94d3b452SApple OSS Distributions }
248*94d3b452SApple OSS Distributions
249*94d3b452SApple OSS Distributions static void
proc_set_wqptr(struct proc * p,struct workqueue * wq)250*94d3b452SApple OSS Distributions proc_set_wqptr(struct proc *p, struct workqueue *wq)
251*94d3b452SApple OSS Distributions {
252*94d3b452SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, wq, release);
253*94d3b452SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
254*94d3b452SApple OSS Distributions proc_lock(p);
255*94d3b452SApple OSS Distributions thread_wakeup(&p->p_wqptr);
256*94d3b452SApple OSS Distributions proc_unlock(p);
257*94d3b452SApple OSS Distributions }
258*94d3b452SApple OSS Distributions }
259*94d3b452SApple OSS Distributions
260*94d3b452SApple OSS Distributions static bool
proc_init_wqptr_or_wait(struct proc * p)261*94d3b452SApple OSS Distributions proc_init_wqptr_or_wait(struct proc *p)
262*94d3b452SApple OSS Distributions {
263*94d3b452SApple OSS Distributions struct workqueue *wq;
264*94d3b452SApple OSS Distributions
265*94d3b452SApple OSS Distributions proc_lock(p);
266*94d3b452SApple OSS Distributions wq = os_atomic_load(&p->p_wqptr, relaxed);
267*94d3b452SApple OSS Distributions
268*94d3b452SApple OSS Distributions if (wq == NULL) {
269*94d3b452SApple OSS Distributions os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
270*94d3b452SApple OSS Distributions proc_unlock(p);
271*94d3b452SApple OSS Distributions return true;
272*94d3b452SApple OSS Distributions }
273*94d3b452SApple OSS Distributions
274*94d3b452SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
275*94d3b452SApple OSS Distributions assert_wait(&p->p_wqptr, THREAD_UNINT);
276*94d3b452SApple OSS Distributions proc_unlock(p);
277*94d3b452SApple OSS Distributions thread_block(THREAD_CONTINUE_NULL);
278*94d3b452SApple OSS Distributions } else {
279*94d3b452SApple OSS Distributions proc_unlock(p);
280*94d3b452SApple OSS Distributions }
281*94d3b452SApple OSS Distributions return false;
282*94d3b452SApple OSS Distributions }
283*94d3b452SApple OSS Distributions
284*94d3b452SApple OSS Distributions static inline event_t
workq_parked_wait_event(struct uthread * uth)285*94d3b452SApple OSS Distributions workq_parked_wait_event(struct uthread *uth)
286*94d3b452SApple OSS Distributions {
287*94d3b452SApple OSS Distributions return (event_t)&uth->uu_workq_stackaddr;
288*94d3b452SApple OSS Distributions }
289*94d3b452SApple OSS Distributions
290*94d3b452SApple OSS Distributions static inline void
workq_thread_wakeup(struct uthread * uth)291*94d3b452SApple OSS Distributions workq_thread_wakeup(struct uthread *uth)
292*94d3b452SApple OSS Distributions {
293*94d3b452SApple OSS Distributions thread_wakeup_thread(workq_parked_wait_event(uth), get_machthread(uth));
294*94d3b452SApple OSS Distributions }
295*94d3b452SApple OSS Distributions
296*94d3b452SApple OSS Distributions #pragma mark wq_thactive
297*94d3b452SApple OSS Distributions
298*94d3b452SApple OSS Distributions #if defined(__LP64__)
299*94d3b452SApple OSS Distributions // Layout is:
300*94d3b452SApple OSS Distributions // 127 - 115 : 13 bits of zeroes
301*94d3b452SApple OSS Distributions // 114 - 112 : best QoS among all pending constrained requests
302*94d3b452SApple OSS Distributions // 111 - 0 : MGR, AUI, UI, IN, DF, UT, BG+MT buckets every 16 bits
303*94d3b452SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 16
304*94d3b452SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (7 * WQ_THACTIVE_BUCKET_WIDTH)
305*94d3b452SApple OSS Distributions #else
306*94d3b452SApple OSS Distributions // Layout is:
307*94d3b452SApple OSS Distributions // 63 - 61 : best QoS among all pending constrained requests
308*94d3b452SApple OSS Distributions // 60 : Manager bucket (0 or 1)
309*94d3b452SApple OSS Distributions // 59 - 0 : AUI, UI, IN, DF, UT, BG+MT buckets every 10 bits
310*94d3b452SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 10
311*94d3b452SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
312*94d3b452SApple OSS Distributions #endif
313*94d3b452SApple OSS Distributions #define WQ_THACTIVE_BUCKET_MASK ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
314*94d3b452SApple OSS Distributions #define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
315*94d3b452SApple OSS Distributions
316*94d3b452SApple OSS Distributions static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
317*94d3b452SApple OSS Distributions "Make sure we have space to encode a QoS");
318*94d3b452SApple OSS Distributions
319*94d3b452SApple OSS Distributions static inline wq_thactive_t
_wq_thactive(struct workqueue * wq)320*94d3b452SApple OSS Distributions _wq_thactive(struct workqueue *wq)
321*94d3b452SApple OSS Distributions {
322*94d3b452SApple OSS Distributions return os_atomic_load_wide(&wq->wq_thactive, relaxed);
323*94d3b452SApple OSS Distributions }
324*94d3b452SApple OSS Distributions
325*94d3b452SApple OSS Distributions static inline uint8_t
_wq_bucket(thread_qos_t qos)326*94d3b452SApple OSS Distributions _wq_bucket(thread_qos_t qos)
327*94d3b452SApple OSS Distributions {
328*94d3b452SApple OSS Distributions // Map both BG and MT to the same bucket by over-shifting down and
329*94d3b452SApple OSS Distributions // clamping MT and BG together.
330*94d3b452SApple OSS Distributions switch (qos) {
331*94d3b452SApple OSS Distributions case THREAD_QOS_MAINTENANCE:
332*94d3b452SApple OSS Distributions return 0;
333*94d3b452SApple OSS Distributions default:
334*94d3b452SApple OSS Distributions return qos - 2;
335*94d3b452SApple OSS Distributions }
336*94d3b452SApple OSS Distributions }
337*94d3b452SApple OSS Distributions
338*94d3b452SApple OSS Distributions #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
339*94d3b452SApple OSS Distributions ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
340*94d3b452SApple OSS Distributions
341*94d3b452SApple OSS Distributions static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue * wq)342*94d3b452SApple OSS Distributions _wq_thactive_best_constrained_req_qos(struct workqueue *wq)
343*94d3b452SApple OSS Distributions {
344*94d3b452SApple OSS Distributions // Avoid expensive atomic operations: the three bits we're loading are in
345*94d3b452SApple OSS Distributions // a single byte, and always updated under the workqueue lock
346*94d3b452SApple OSS Distributions wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
347*94d3b452SApple OSS Distributions return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
348*94d3b452SApple OSS Distributions }
349*94d3b452SApple OSS Distributions
350*94d3b452SApple OSS Distributions static void
_wq_thactive_refresh_best_constrained_req_qos(struct workqueue * wq)351*94d3b452SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq)
352*94d3b452SApple OSS Distributions {
353*94d3b452SApple OSS Distributions thread_qos_t old_qos, new_qos;
354*94d3b452SApple OSS Distributions workq_threadreq_t req;
355*94d3b452SApple OSS Distributions
356*94d3b452SApple OSS Distributions req = priority_queue_max(&wq->wq_constrained_queue,
357*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
358*94d3b452SApple OSS Distributions new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
359*94d3b452SApple OSS Distributions old_qos = _wq_thactive_best_constrained_req_qos(wq);
360*94d3b452SApple OSS Distributions if (old_qos != new_qos) {
361*94d3b452SApple OSS Distributions long delta = (long)new_qos - (long)old_qos;
362*94d3b452SApple OSS Distributions wq_thactive_t v = (wq_thactive_t)delta << WQ_THACTIVE_QOS_SHIFT;
363*94d3b452SApple OSS Distributions /*
364*94d3b452SApple OSS Distributions * We can do an atomic add relative to the initial load because updates
365*94d3b452SApple OSS Distributions * to this qos are always serialized under the workqueue lock.
366*94d3b452SApple OSS Distributions */
367*94d3b452SApple OSS Distributions v = os_atomic_add(&wq->wq_thactive, v, relaxed);
368*94d3b452SApple OSS Distributions #ifdef __LP64__
369*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
370*94d3b452SApple OSS Distributions (uint64_t)(v >> 64), 0);
371*94d3b452SApple OSS Distributions #else
372*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0);
373*94d3b452SApple OSS Distributions #endif
374*94d3b452SApple OSS Distributions }
375*94d3b452SApple OSS Distributions }
376*94d3b452SApple OSS Distributions
377*94d3b452SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_offset_for_qos(thread_qos_t qos)378*94d3b452SApple OSS Distributions _wq_thactive_offset_for_qos(thread_qos_t qos)
379*94d3b452SApple OSS Distributions {
380*94d3b452SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
381*94d3b452SApple OSS Distributions __builtin_assume(bucket < WORKQ_NUM_BUCKETS);
382*94d3b452SApple OSS Distributions return (wq_thactive_t)1 << (bucket * WQ_THACTIVE_BUCKET_WIDTH);
383*94d3b452SApple OSS Distributions }
384*94d3b452SApple OSS Distributions
385*94d3b452SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_inc(struct workqueue * wq,thread_qos_t qos)386*94d3b452SApple OSS Distributions _wq_thactive_inc(struct workqueue *wq, thread_qos_t qos)
387*94d3b452SApple OSS Distributions {
388*94d3b452SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
389*94d3b452SApple OSS Distributions return os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
390*94d3b452SApple OSS Distributions }
391*94d3b452SApple OSS Distributions
392*94d3b452SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_dec(struct workqueue * wq,thread_qos_t qos)393*94d3b452SApple OSS Distributions _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos)
394*94d3b452SApple OSS Distributions {
395*94d3b452SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
396*94d3b452SApple OSS Distributions return os_atomic_sub_orig(&wq->wq_thactive, v, relaxed);
397*94d3b452SApple OSS Distributions }
398*94d3b452SApple OSS Distributions
399*94d3b452SApple OSS Distributions static inline void
_wq_thactive_move(struct workqueue * wq,thread_qos_t old_qos,thread_qos_t new_qos)400*94d3b452SApple OSS Distributions _wq_thactive_move(struct workqueue *wq,
401*94d3b452SApple OSS Distributions thread_qos_t old_qos, thread_qos_t new_qos)
402*94d3b452SApple OSS Distributions {
403*94d3b452SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
404*94d3b452SApple OSS Distributions _wq_thactive_offset_for_qos(old_qos);
405*94d3b452SApple OSS Distributions os_atomic_add(&wq->wq_thactive, v, relaxed);
406*94d3b452SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
407*94d3b452SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
408*94d3b452SApple OSS Distributions }
409*94d3b452SApple OSS Distributions
410*94d3b452SApple OSS Distributions static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue * wq,wq_thactive_t v,thread_qos_t qos,uint32_t * busycount,uint32_t * max_busycount)411*94d3b452SApple OSS Distributions _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
412*94d3b452SApple OSS Distributions thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
413*94d3b452SApple OSS Distributions {
414*94d3b452SApple OSS Distributions uint32_t count = 0, active;
415*94d3b452SApple OSS Distributions uint64_t curtime;
416*94d3b452SApple OSS Distributions
417*94d3b452SApple OSS Distributions assert(WORKQ_THREAD_QOS_MIN <= qos && qos <= WORKQ_THREAD_QOS_MAX);
418*94d3b452SApple OSS Distributions
419*94d3b452SApple OSS Distributions if (busycount) {
420*94d3b452SApple OSS Distributions curtime = mach_absolute_time();
421*94d3b452SApple OSS Distributions *busycount = 0;
422*94d3b452SApple OSS Distributions }
423*94d3b452SApple OSS Distributions if (max_busycount) {
424*94d3b452SApple OSS Distributions *max_busycount = THREAD_QOS_LAST - qos;
425*94d3b452SApple OSS Distributions }
426*94d3b452SApple OSS Distributions
427*94d3b452SApple OSS Distributions uint8_t i = _wq_bucket(qos);
428*94d3b452SApple OSS Distributions v >>= i * WQ_THACTIVE_BUCKET_WIDTH;
429*94d3b452SApple OSS Distributions for (; i < WORKQ_NUM_QOS_BUCKETS; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
430*94d3b452SApple OSS Distributions active = v & WQ_THACTIVE_BUCKET_MASK;
431*94d3b452SApple OSS Distributions count += active;
432*94d3b452SApple OSS Distributions
433*94d3b452SApple OSS Distributions if (busycount && wq->wq_thscheduled_count[i] > active) {
434*94d3b452SApple OSS Distributions if (workq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
435*94d3b452SApple OSS Distributions /*
436*94d3b452SApple OSS Distributions * We only consider the last blocked thread for a given bucket
437*94d3b452SApple OSS Distributions * as busy because we don't want to take the list lock in each
438*94d3b452SApple OSS Distributions * sched callback. However this is an approximation that could
439*94d3b452SApple OSS Distributions * contribute to thread creation storms.
440*94d3b452SApple OSS Distributions */
441*94d3b452SApple OSS Distributions (*busycount)++;
442*94d3b452SApple OSS Distributions }
443*94d3b452SApple OSS Distributions }
444*94d3b452SApple OSS Distributions }
445*94d3b452SApple OSS Distributions
446*94d3b452SApple OSS Distributions return count;
447*94d3b452SApple OSS Distributions }
448*94d3b452SApple OSS Distributions
449*94d3b452SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
450*94d3b452SApple OSS Distributions * for any overrides */
451*94d3b452SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_dec(struct workqueue * wq,thread_qos_t qos)452*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(struct workqueue *wq, thread_qos_t qos)
453*94d3b452SApple OSS Distributions {
454*94d3b452SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]--;
455*94d3b452SApple OSS Distributions assert(old_scheduled_count > 0);
456*94d3b452SApple OSS Distributions }
457*94d3b452SApple OSS Distributions
458*94d3b452SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
459*94d3b452SApple OSS Distributions * for any overrides */
460*94d3b452SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_inc(struct workqueue * wq,thread_qos_t qos)461*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(struct workqueue *wq, thread_qos_t qos)
462*94d3b452SApple OSS Distributions {
463*94d3b452SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]++;
464*94d3b452SApple OSS Distributions assert(old_scheduled_count < UINT8_MAX);
465*94d3b452SApple OSS Distributions }
466*94d3b452SApple OSS Distributions
467*94d3b452SApple OSS Distributions #pragma mark wq_flags
468*94d3b452SApple OSS Distributions
469*94d3b452SApple OSS Distributions static inline uint32_t
_wq_flags(struct workqueue * wq)470*94d3b452SApple OSS Distributions _wq_flags(struct workqueue *wq)
471*94d3b452SApple OSS Distributions {
472*94d3b452SApple OSS Distributions return os_atomic_load(&wq->wq_flags, relaxed);
473*94d3b452SApple OSS Distributions }
474*94d3b452SApple OSS Distributions
475*94d3b452SApple OSS Distributions static inline bool
_wq_exiting(struct workqueue * wq)476*94d3b452SApple OSS Distributions _wq_exiting(struct workqueue *wq)
477*94d3b452SApple OSS Distributions {
478*94d3b452SApple OSS Distributions return _wq_flags(wq) & WQ_EXITING;
479*94d3b452SApple OSS Distributions }
480*94d3b452SApple OSS Distributions
481*94d3b452SApple OSS Distributions bool
workq_is_exiting(struct proc * p)482*94d3b452SApple OSS Distributions workq_is_exiting(struct proc *p)
483*94d3b452SApple OSS Distributions {
484*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
485*94d3b452SApple OSS Distributions return !wq || _wq_exiting(wq);
486*94d3b452SApple OSS Distributions }
487*94d3b452SApple OSS Distributions
488*94d3b452SApple OSS Distributions
489*94d3b452SApple OSS Distributions #pragma mark workqueue lock
490*94d3b452SApple OSS Distributions
491*94d3b452SApple OSS Distributions static bool
workq_lock_is_acquired_kdp(struct workqueue * wq)492*94d3b452SApple OSS Distributions workq_lock_is_acquired_kdp(struct workqueue *wq)
493*94d3b452SApple OSS Distributions {
494*94d3b452SApple OSS Distributions return kdp_lck_ticket_is_acquired(&wq->wq_lock);
495*94d3b452SApple OSS Distributions }
496*94d3b452SApple OSS Distributions
497*94d3b452SApple OSS Distributions static inline void
workq_lock_spin(struct workqueue * wq)498*94d3b452SApple OSS Distributions workq_lock_spin(struct workqueue *wq)
499*94d3b452SApple OSS Distributions {
500*94d3b452SApple OSS Distributions lck_ticket_lock(&wq->wq_lock, &workq_lck_grp);
501*94d3b452SApple OSS Distributions }
502*94d3b452SApple OSS Distributions
503*94d3b452SApple OSS Distributions static inline void
workq_lock_held(struct workqueue * wq)504*94d3b452SApple OSS Distributions workq_lock_held(struct workqueue *wq)
505*94d3b452SApple OSS Distributions {
506*94d3b452SApple OSS Distributions LCK_TICKET_ASSERT_OWNED(&wq->wq_lock);
507*94d3b452SApple OSS Distributions }
508*94d3b452SApple OSS Distributions
509*94d3b452SApple OSS Distributions static inline bool
workq_lock_try(struct workqueue * wq)510*94d3b452SApple OSS Distributions workq_lock_try(struct workqueue *wq)
511*94d3b452SApple OSS Distributions {
512*94d3b452SApple OSS Distributions return lck_ticket_lock_try(&wq->wq_lock, &workq_lck_grp);
513*94d3b452SApple OSS Distributions }
514*94d3b452SApple OSS Distributions
515*94d3b452SApple OSS Distributions static inline void
workq_unlock(struct workqueue * wq)516*94d3b452SApple OSS Distributions workq_unlock(struct workqueue *wq)
517*94d3b452SApple OSS Distributions {
518*94d3b452SApple OSS Distributions lck_ticket_unlock(&wq->wq_lock);
519*94d3b452SApple OSS Distributions }
520*94d3b452SApple OSS Distributions
521*94d3b452SApple OSS Distributions #pragma mark idle thread lists
522*94d3b452SApple OSS Distributions
523*94d3b452SApple OSS Distributions #define WORKQ_POLICY_INIT(qos) \
524*94d3b452SApple OSS Distributions (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
525*94d3b452SApple OSS Distributions
526*94d3b452SApple OSS Distributions static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)527*94d3b452SApple OSS Distributions workq_pri_bucket(struct uu_workq_policy req)
528*94d3b452SApple OSS Distributions {
529*94d3b452SApple OSS Distributions return MAX(MAX(req.qos_req, req.qos_max), req.qos_override);
530*94d3b452SApple OSS Distributions }
531*94d3b452SApple OSS Distributions
532*94d3b452SApple OSS Distributions static inline thread_qos_t
workq_pri_override(struct uu_workq_policy req)533*94d3b452SApple OSS Distributions workq_pri_override(struct uu_workq_policy req)
534*94d3b452SApple OSS Distributions {
535*94d3b452SApple OSS Distributions return MAX(workq_pri_bucket(req), req.qos_bucket);
536*94d3b452SApple OSS Distributions }
537*94d3b452SApple OSS Distributions
538*94d3b452SApple OSS Distributions static inline bool
workq_thread_needs_params_change(workq_threadreq_t req,struct uthread * uth)539*94d3b452SApple OSS Distributions workq_thread_needs_params_change(workq_threadreq_t req, struct uthread *uth)
540*94d3b452SApple OSS Distributions {
541*94d3b452SApple OSS Distributions workq_threadreq_param_t cur_trp, req_trp = { };
542*94d3b452SApple OSS Distributions
543*94d3b452SApple OSS Distributions cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
544*94d3b452SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
545*94d3b452SApple OSS Distributions req_trp = kqueue_threadreq_workloop_param(req);
546*94d3b452SApple OSS Distributions }
547*94d3b452SApple OSS Distributions
548*94d3b452SApple OSS Distributions /*
549*94d3b452SApple OSS Distributions * CPU percent flags are handled separately to policy changes, so ignore
550*94d3b452SApple OSS Distributions * them for all of these checks.
551*94d3b452SApple OSS Distributions */
552*94d3b452SApple OSS Distributions uint16_t cur_flags = (cur_trp.trp_flags & ~TRP_CPUPERCENT);
553*94d3b452SApple OSS Distributions uint16_t req_flags = (req_trp.trp_flags & ~TRP_CPUPERCENT);
554*94d3b452SApple OSS Distributions
555*94d3b452SApple OSS Distributions if (!req_flags && !cur_flags) {
556*94d3b452SApple OSS Distributions return false;
557*94d3b452SApple OSS Distributions }
558*94d3b452SApple OSS Distributions
559*94d3b452SApple OSS Distributions if (req_flags != cur_flags) {
560*94d3b452SApple OSS Distributions return true;
561*94d3b452SApple OSS Distributions }
562*94d3b452SApple OSS Distributions
563*94d3b452SApple OSS Distributions if ((req_flags & TRP_PRIORITY) && req_trp.trp_pri != cur_trp.trp_pri) {
564*94d3b452SApple OSS Distributions return true;
565*94d3b452SApple OSS Distributions }
566*94d3b452SApple OSS Distributions
567*94d3b452SApple OSS Distributions if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
568*94d3b452SApple OSS Distributions return true;
569*94d3b452SApple OSS Distributions }
570*94d3b452SApple OSS Distributions
571*94d3b452SApple OSS Distributions return false;
572*94d3b452SApple OSS Distributions }
573*94d3b452SApple OSS Distributions
574*94d3b452SApple OSS Distributions static inline bool
workq_thread_needs_priority_change(workq_threadreq_t req,struct uthread * uth)575*94d3b452SApple OSS Distributions workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth)
576*94d3b452SApple OSS Distributions {
577*94d3b452SApple OSS Distributions if (workq_thread_needs_params_change(req, uth)) {
578*94d3b452SApple OSS Distributions return true;
579*94d3b452SApple OSS Distributions }
580*94d3b452SApple OSS Distributions
581*94d3b452SApple OSS Distributions if (req->tr_qos != workq_pri_override(uth->uu_workq_pri)) {
582*94d3b452SApple OSS Distributions return true;
583*94d3b452SApple OSS Distributions }
584*94d3b452SApple OSS Distributions
585*94d3b452SApple OSS Distributions #if CONFIG_PREADOPT_TG
586*94d3b452SApple OSS Distributions thread_group_qos_t tg = kqr_preadopt_thread_group(req);
587*94d3b452SApple OSS Distributions if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
588*94d3b452SApple OSS Distributions /*
589*94d3b452SApple OSS Distributions * Ideally, we'd add check here to see if thread's preadopt TG is same
590*94d3b452SApple OSS Distributions * as the thread requests's thread group and short circuit if that is
591*94d3b452SApple OSS Distributions * the case. But in the interest of keeping the code clean and not
592*94d3b452SApple OSS Distributions * taking the thread lock here, we're going to skip this. We will
593*94d3b452SApple OSS Distributions * eventually shortcircuit once we try to set the preadoption thread
594*94d3b452SApple OSS Distributions * group on the thread.
595*94d3b452SApple OSS Distributions */
596*94d3b452SApple OSS Distributions return true;
597*94d3b452SApple OSS Distributions }
598*94d3b452SApple OSS Distributions #endif
599*94d3b452SApple OSS Distributions
600*94d3b452SApple OSS Distributions return false;
601*94d3b452SApple OSS Distributions }
602*94d3b452SApple OSS Distributions
603*94d3b452SApple OSS Distributions /* Input thread must be self. Called during self override, resetting overrides
604*94d3b452SApple OSS Distributions * or while processing kevents
605*94d3b452SApple OSS Distributions *
606*94d3b452SApple OSS Distributions * Called with workq lock held. Sometimes also the thread mutex
607*94d3b452SApple OSS Distributions */
608*94d3b452SApple OSS Distributions static void
workq_thread_update_bucket(proc_t p,struct workqueue * wq,struct uthread * uth,struct uu_workq_policy old_pri,struct uu_workq_policy new_pri,bool force_run)609*94d3b452SApple OSS Distributions workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
610*94d3b452SApple OSS Distributions struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
611*94d3b452SApple OSS Distributions bool force_run)
612*94d3b452SApple OSS Distributions {
613*94d3b452SApple OSS Distributions assert(uth == current_uthread());
614*94d3b452SApple OSS Distributions
615*94d3b452SApple OSS Distributions thread_qos_t old_bucket = old_pri.qos_bucket;
616*94d3b452SApple OSS Distributions thread_qos_t new_bucket = workq_pri_bucket(new_pri);
617*94d3b452SApple OSS Distributions
618*94d3b452SApple OSS Distributions if (old_bucket != new_bucket) {
619*94d3b452SApple OSS Distributions _wq_thactive_move(wq, old_bucket, new_bucket);
620*94d3b452SApple OSS Distributions }
621*94d3b452SApple OSS Distributions
622*94d3b452SApple OSS Distributions new_pri.qos_bucket = new_bucket;
623*94d3b452SApple OSS Distributions uth->uu_workq_pri = new_pri;
624*94d3b452SApple OSS Distributions
625*94d3b452SApple OSS Distributions if (old_pri.qos_override != new_pri.qos_override) {
626*94d3b452SApple OSS Distributions thread_set_workq_override(get_machthread(uth), new_pri.qos_override);
627*94d3b452SApple OSS Distributions }
628*94d3b452SApple OSS Distributions
629*94d3b452SApple OSS Distributions if (wq->wq_reqcount && (old_bucket > new_bucket || force_run)) {
630*94d3b452SApple OSS Distributions int flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
631*94d3b452SApple OSS Distributions if (old_bucket > new_bucket) {
632*94d3b452SApple OSS Distributions /*
633*94d3b452SApple OSS Distributions * When lowering our bucket, we may unblock a thread request,
634*94d3b452SApple OSS Distributions * but we can't drop our priority before we have evaluated
635*94d3b452SApple OSS Distributions * whether this is the case, and if we ever drop the workqueue lock
636*94d3b452SApple OSS Distributions * that would cause a priority inversion.
637*94d3b452SApple OSS Distributions *
638*94d3b452SApple OSS Distributions * We hence have to disallow thread creation in that case.
639*94d3b452SApple OSS Distributions */
640*94d3b452SApple OSS Distributions flags = 0;
641*94d3b452SApple OSS Distributions }
642*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, flags);
643*94d3b452SApple OSS Distributions }
644*94d3b452SApple OSS Distributions }
645*94d3b452SApple OSS Distributions
646*94d3b452SApple OSS Distributions /*
647*94d3b452SApple OSS Distributions * Sets/resets the cpu percent limits on the current thread. We can't set
648*94d3b452SApple OSS Distributions * these limits from outside of the current thread, so this function needs
649*94d3b452SApple OSS Distributions * to be called when we're executing on the intended
650*94d3b452SApple OSS Distributions */
651*94d3b452SApple OSS Distributions static void
workq_thread_reset_cpupercent(workq_threadreq_t req,struct uthread * uth)652*94d3b452SApple OSS Distributions workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth)
653*94d3b452SApple OSS Distributions {
654*94d3b452SApple OSS Distributions assert(uth == current_uthread());
655*94d3b452SApple OSS Distributions workq_threadreq_param_t trp = { };
656*94d3b452SApple OSS Distributions
657*94d3b452SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
658*94d3b452SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
659*94d3b452SApple OSS Distributions }
660*94d3b452SApple OSS Distributions
661*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_CPUPERCENT) {
662*94d3b452SApple OSS Distributions /*
663*94d3b452SApple OSS Distributions * Going through disable when we have an existing CPU percent limit
664*94d3b452SApple OSS Distributions * set will force the ledger to refill the token bucket of the current
665*94d3b452SApple OSS Distributions * thread. Removing any penalty applied by previous thread use.
666*94d3b452SApple OSS Distributions */
667*94d3b452SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
668*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_CPUPERCENT;
669*94d3b452SApple OSS Distributions }
670*94d3b452SApple OSS Distributions
671*94d3b452SApple OSS Distributions if (trp.trp_flags & TRP_CPUPERCENT) {
672*94d3b452SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
673*94d3b452SApple OSS Distributions (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
674*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
675*94d3b452SApple OSS Distributions }
676*94d3b452SApple OSS Distributions }
677*94d3b452SApple OSS Distributions
678*94d3b452SApple OSS Distributions /* Called with the workq lock held */
679*94d3b452SApple OSS Distributions static void
workq_thread_reset_pri(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req,bool unpark)680*94d3b452SApple OSS Distributions workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
681*94d3b452SApple OSS Distributions workq_threadreq_t req, bool unpark)
682*94d3b452SApple OSS Distributions {
683*94d3b452SApple OSS Distributions thread_t th = get_machthread(uth);
684*94d3b452SApple OSS Distributions thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
685*94d3b452SApple OSS Distributions workq_threadreq_param_t trp = { };
686*94d3b452SApple OSS Distributions int priority = 31;
687*94d3b452SApple OSS Distributions int policy = POLICY_TIMESHARE;
688*94d3b452SApple OSS Distributions
689*94d3b452SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
690*94d3b452SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
691*94d3b452SApple OSS Distributions }
692*94d3b452SApple OSS Distributions
693*94d3b452SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
694*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
695*94d3b452SApple OSS Distributions
696*94d3b452SApple OSS Distributions if (unpark) {
697*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
698*94d3b452SApple OSS Distributions // qos sent out to userspace (may differ from uu_workq_pri on param threads)
699*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
700*94d3b452SApple OSS Distributions }
701*94d3b452SApple OSS Distributions
702*94d3b452SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
703*94d3b452SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
704*94d3b452SApple OSS Distributions assert(trp.trp_value == 0); // manager qos and thread policy don't mix
705*94d3b452SApple OSS Distributions
706*94d3b452SApple OSS Distributions if (_pthread_priority_has_sched_pri(mgr_pri)) {
707*94d3b452SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
708*94d3b452SApple OSS Distributions thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
709*94d3b452SApple OSS Distributions POLICY_TIMESHARE);
710*94d3b452SApple OSS Distributions return;
711*94d3b452SApple OSS Distributions }
712*94d3b452SApple OSS Distributions
713*94d3b452SApple OSS Distributions qos = _pthread_priority_thread_qos(mgr_pri);
714*94d3b452SApple OSS Distributions } else {
715*94d3b452SApple OSS Distributions if (trp.trp_flags & TRP_PRIORITY) {
716*94d3b452SApple OSS Distributions qos = THREAD_QOS_UNSPECIFIED;
717*94d3b452SApple OSS Distributions priority = trp.trp_pri;
718*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_OUTSIDE_QOS;
719*94d3b452SApple OSS Distributions }
720*94d3b452SApple OSS Distributions
721*94d3b452SApple OSS Distributions if (trp.trp_flags & TRP_POLICY) {
722*94d3b452SApple OSS Distributions policy = trp.trp_pol;
723*94d3b452SApple OSS Distributions }
724*94d3b452SApple OSS Distributions }
725*94d3b452SApple OSS Distributions
726*94d3b452SApple OSS Distributions #if CONFIG_PREADOPT_TG
727*94d3b452SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP)) {
728*94d3b452SApple OSS Distributions /*
729*94d3b452SApple OSS Distributions * For kqwl permanently configured with a thread group, we can safely borrow
730*94d3b452SApple OSS Distributions * +1 ref from kqwl_preadopt_tg. A thread then takes additional +1 ref
731*94d3b452SApple OSS Distributions * for itself via thread_set_preadopt_thread_group.
732*94d3b452SApple OSS Distributions *
733*94d3b452SApple OSS Distributions * In all other cases, we cannot safely read and borrow the reference from the kqwl
734*94d3b452SApple OSS Distributions * since it can disappear from under us at any time due to the max-ing logic in
735*94d3b452SApple OSS Distributions * kqueue_set_preadopted_thread_group.
736*94d3b452SApple OSS Distributions *
737*94d3b452SApple OSS Distributions * As such, we do the following dance:
738*94d3b452SApple OSS Distributions *
739*94d3b452SApple OSS Distributions * 1) cmpxchng and steal the kqwl's preadopt thread group and leave
740*94d3b452SApple OSS Distributions * behind with (NULL + QoS). At this point, we have the reference
741*94d3b452SApple OSS Distributions * to the thread group from the kqwl.
742*94d3b452SApple OSS Distributions * 2) Have the thread set the preadoption thread group on itself.
743*94d3b452SApple OSS Distributions * 3) cmpxchng from (NULL + QoS) which we set earlier in (1), back to
744*94d3b452SApple OSS Distributions * thread_group + QoS. ie we try to give the reference back to the kqwl.
745*94d3b452SApple OSS Distributions * If we fail, that's because a higher QoS thread group was set on the
746*94d3b452SApple OSS Distributions * kqwl in kqueue_set_preadopted_thread_group in which case, we need to
747*94d3b452SApple OSS Distributions * go back to (1).
748*94d3b452SApple OSS Distributions */
749*94d3b452SApple OSS Distributions
750*94d3b452SApple OSS Distributions _Atomic(struct thread_group *) * tg_loc = kqr_preadopt_thread_group_addr(req);
751*94d3b452SApple OSS Distributions
752*94d3b452SApple OSS Distributions thread_group_qos_t old_tg, new_tg;
753*94d3b452SApple OSS Distributions int ret = 0;
754*94d3b452SApple OSS Distributions again:
755*94d3b452SApple OSS Distributions ret = os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
756*94d3b452SApple OSS Distributions if ((!KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) ||
757*94d3b452SApple OSS Distributions KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
758*94d3b452SApple OSS Distributions os_atomic_rmw_loop_give_up(break);
759*94d3b452SApple OSS Distributions }
760*94d3b452SApple OSS Distributions
761*94d3b452SApple OSS Distributions /*
762*94d3b452SApple OSS Distributions * Leave the QoS behind - kqueue_set_preadopted_thread_group will
763*94d3b452SApple OSS Distributions * only modify it if there is a higher QoS thread group to attach
764*94d3b452SApple OSS Distributions */
765*94d3b452SApple OSS Distributions new_tg = (thread_group_qos_t) ((uintptr_t) old_tg & KQWL_PREADOPT_TG_QOS_MASK);
766*94d3b452SApple OSS Distributions });
767*94d3b452SApple OSS Distributions
768*94d3b452SApple OSS Distributions if (ret) {
769*94d3b452SApple OSS Distributions /*
770*94d3b452SApple OSS Distributions * We successfully took the ref from the kqwl so set it on the
771*94d3b452SApple OSS Distributions * thread now
772*94d3b452SApple OSS Distributions */
773*94d3b452SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
774*94d3b452SApple OSS Distributions
775*94d3b452SApple OSS Distributions thread_group_qos_t thread_group_to_expect = new_tg;
776*94d3b452SApple OSS Distributions thread_group_qos_t thread_group_to_set = old_tg;
777*94d3b452SApple OSS Distributions
778*94d3b452SApple OSS Distributions os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
779*94d3b452SApple OSS Distributions if (old_tg != thread_group_to_expect) {
780*94d3b452SApple OSS Distributions /*
781*94d3b452SApple OSS Distributions * There was an intervening write to the kqwl_preadopt_tg,
782*94d3b452SApple OSS Distributions * and it has a higher QoS than what we are working with
783*94d3b452SApple OSS Distributions * here. Abandon our current adopted thread group and redo
784*94d3b452SApple OSS Distributions * the full dance
785*94d3b452SApple OSS Distributions */
786*94d3b452SApple OSS Distributions thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(thread_group_to_set));
787*94d3b452SApple OSS Distributions os_atomic_rmw_loop_give_up(goto again);
788*94d3b452SApple OSS Distributions }
789*94d3b452SApple OSS Distributions
790*94d3b452SApple OSS Distributions new_tg = thread_group_to_set;
791*94d3b452SApple OSS Distributions });
792*94d3b452SApple OSS Distributions } else {
793*94d3b452SApple OSS Distributions if (KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
794*94d3b452SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
795*94d3b452SApple OSS Distributions } else {
796*94d3b452SApple OSS Distributions /* Nothing valid on the kqwl, just clear what's on the thread */
797*94d3b452SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
798*94d3b452SApple OSS Distributions }
799*94d3b452SApple OSS Distributions }
800*94d3b452SApple OSS Distributions } else {
801*94d3b452SApple OSS Distributions /* Not even a kqwl, clear what's on the thread */
802*94d3b452SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
803*94d3b452SApple OSS Distributions }
804*94d3b452SApple OSS Distributions #endif
805*94d3b452SApple OSS Distributions thread_set_workq_pri(th, qos, priority, policy);
806*94d3b452SApple OSS Distributions }
807*94d3b452SApple OSS Distributions
808*94d3b452SApple OSS Distributions /*
809*94d3b452SApple OSS Distributions * Called by kevent with the NOTE_WL_THREAD_REQUEST knote lock held,
810*94d3b452SApple OSS Distributions * every time a servicer is being told about a new max QoS.
811*94d3b452SApple OSS Distributions */
812*94d3b452SApple OSS Distributions void
workq_thread_set_max_qos(struct proc * p,workq_threadreq_t kqr)813*94d3b452SApple OSS Distributions workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
814*94d3b452SApple OSS Distributions {
815*94d3b452SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
816*94d3b452SApple OSS Distributions struct uthread *uth = current_uthread();
817*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
818*94d3b452SApple OSS Distributions thread_qos_t qos = kqr->tr_kq_qos_index;
819*94d3b452SApple OSS Distributions
820*94d3b452SApple OSS Distributions if (uth->uu_workq_pri.qos_max == qos) {
821*94d3b452SApple OSS Distributions return;
822*94d3b452SApple OSS Distributions }
823*94d3b452SApple OSS Distributions
824*94d3b452SApple OSS Distributions workq_lock_spin(wq);
825*94d3b452SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
826*94d3b452SApple OSS Distributions new_pri.qos_max = qos;
827*94d3b452SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
828*94d3b452SApple OSS Distributions workq_unlock(wq);
829*94d3b452SApple OSS Distributions }
830*94d3b452SApple OSS Distributions
831*94d3b452SApple OSS Distributions #pragma mark idle threads accounting and handling
832*94d3b452SApple OSS Distributions
833*94d3b452SApple OSS Distributions static inline struct uthread *
workq_oldest_killable_idle_thread(struct workqueue * wq)834*94d3b452SApple OSS Distributions workq_oldest_killable_idle_thread(struct workqueue *wq)
835*94d3b452SApple OSS Distributions {
836*94d3b452SApple OSS Distributions struct uthread *uth = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
837*94d3b452SApple OSS Distributions
838*94d3b452SApple OSS Distributions if (uth && !uth->uu_save.uus_workq_park_data.has_stack) {
839*94d3b452SApple OSS Distributions uth = TAILQ_PREV(uth, workq_uthread_head, uu_workq_entry);
840*94d3b452SApple OSS Distributions if (uth) {
841*94d3b452SApple OSS Distributions assert(uth->uu_save.uus_workq_park_data.has_stack);
842*94d3b452SApple OSS Distributions }
843*94d3b452SApple OSS Distributions }
844*94d3b452SApple OSS Distributions return uth;
845*94d3b452SApple OSS Distributions }
846*94d3b452SApple OSS Distributions
847*94d3b452SApple OSS Distributions static inline uint64_t
workq_kill_delay_for_idle_thread(struct workqueue * wq)848*94d3b452SApple OSS Distributions workq_kill_delay_for_idle_thread(struct workqueue *wq)
849*94d3b452SApple OSS Distributions {
850*94d3b452SApple OSS Distributions uint64_t delay = wq_reduce_pool_window.abstime;
851*94d3b452SApple OSS Distributions uint16_t idle = wq->wq_thidlecount;
852*94d3b452SApple OSS Distributions
853*94d3b452SApple OSS Distributions /*
854*94d3b452SApple OSS Distributions * If we have less than wq_death_max_load threads, have a 5s timer.
855*94d3b452SApple OSS Distributions *
856*94d3b452SApple OSS Distributions * For the next wq_max_constrained_threads ones, decay linearly from
857*94d3b452SApple OSS Distributions * from 5s to 50ms.
858*94d3b452SApple OSS Distributions */
859*94d3b452SApple OSS Distributions if (idle <= wq_death_max_load) {
860*94d3b452SApple OSS Distributions return delay;
861*94d3b452SApple OSS Distributions }
862*94d3b452SApple OSS Distributions
863*94d3b452SApple OSS Distributions if (wq_max_constrained_threads > idle - wq_death_max_load) {
864*94d3b452SApple OSS Distributions delay *= (wq_max_constrained_threads - (idle - wq_death_max_load));
865*94d3b452SApple OSS Distributions }
866*94d3b452SApple OSS Distributions return delay / wq_max_constrained_threads;
867*94d3b452SApple OSS Distributions }
868*94d3b452SApple OSS Distributions
869*94d3b452SApple OSS Distributions static inline bool
workq_should_kill_idle_thread(struct workqueue * wq,struct uthread * uth,uint64_t now)870*94d3b452SApple OSS Distributions workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
871*94d3b452SApple OSS Distributions uint64_t now)
872*94d3b452SApple OSS Distributions {
873*94d3b452SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
874*94d3b452SApple OSS Distributions return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
875*94d3b452SApple OSS Distributions }
876*94d3b452SApple OSS Distributions
877*94d3b452SApple OSS Distributions static void
workq_death_call_schedule(struct workqueue * wq,uint64_t deadline)878*94d3b452SApple OSS Distributions workq_death_call_schedule(struct workqueue *wq, uint64_t deadline)
879*94d3b452SApple OSS Distributions {
880*94d3b452SApple OSS Distributions uint32_t wq_flags = os_atomic_load(&wq->wq_flags, relaxed);
881*94d3b452SApple OSS Distributions
882*94d3b452SApple OSS Distributions if (wq_flags & (WQ_EXITING | WQ_DEATH_CALL_SCHEDULED)) {
883*94d3b452SApple OSS Distributions return;
884*94d3b452SApple OSS Distributions }
885*94d3b452SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
886*94d3b452SApple OSS Distributions
887*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_NONE, wq, 1, 0, 0);
888*94d3b452SApple OSS Distributions
889*94d3b452SApple OSS Distributions /*
890*94d3b452SApple OSS Distributions * <rdar://problem/13139182> Due to how long term timers work, the leeway
891*94d3b452SApple OSS Distributions * can't be too short, so use 500ms which is long enough that we will not
892*94d3b452SApple OSS Distributions * wake up the CPU for killing threads, but short enough that it doesn't
893*94d3b452SApple OSS Distributions * fall into long-term timer list shenanigans.
894*94d3b452SApple OSS Distributions */
895*94d3b452SApple OSS Distributions thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
896*94d3b452SApple OSS Distributions wq_reduce_pool_window.abstime / 10,
897*94d3b452SApple OSS Distributions THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
898*94d3b452SApple OSS Distributions }
899*94d3b452SApple OSS Distributions
900*94d3b452SApple OSS Distributions /*
901*94d3b452SApple OSS Distributions * `decrement` is set to the number of threads that are no longer dying:
902*94d3b452SApple OSS Distributions * - because they have been resuscitated just in time (workq_pop_idle_thread)
903*94d3b452SApple OSS Distributions * - or have been killed (workq_thread_terminate).
904*94d3b452SApple OSS Distributions */
905*94d3b452SApple OSS Distributions static void
workq_death_policy_evaluate(struct workqueue * wq,uint16_t decrement)906*94d3b452SApple OSS Distributions workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement)
907*94d3b452SApple OSS Distributions {
908*94d3b452SApple OSS Distributions struct uthread *uth;
909*94d3b452SApple OSS Distributions
910*94d3b452SApple OSS Distributions assert(wq->wq_thdying_count >= decrement);
911*94d3b452SApple OSS Distributions if ((wq->wq_thdying_count -= decrement) > 0) {
912*94d3b452SApple OSS Distributions return;
913*94d3b452SApple OSS Distributions }
914*94d3b452SApple OSS Distributions
915*94d3b452SApple OSS Distributions if (wq->wq_thidlecount <= 1) {
916*94d3b452SApple OSS Distributions return;
917*94d3b452SApple OSS Distributions }
918*94d3b452SApple OSS Distributions
919*94d3b452SApple OSS Distributions if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
920*94d3b452SApple OSS Distributions return;
921*94d3b452SApple OSS Distributions }
922*94d3b452SApple OSS Distributions
923*94d3b452SApple OSS Distributions uint64_t now = mach_absolute_time();
924*94d3b452SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
925*94d3b452SApple OSS Distributions
926*94d3b452SApple OSS Distributions if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
927*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
928*94d3b452SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
929*94d3b452SApple OSS Distributions wq->wq_thdying_count++;
930*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
931*94d3b452SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
932*94d3b452SApple OSS Distributions workq_thread_wakeup(uth);
933*94d3b452SApple OSS Distributions }
934*94d3b452SApple OSS Distributions return;
935*94d3b452SApple OSS Distributions }
936*94d3b452SApple OSS Distributions
937*94d3b452SApple OSS Distributions workq_death_call_schedule(wq,
938*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp + delay);
939*94d3b452SApple OSS Distributions }
940*94d3b452SApple OSS Distributions
941*94d3b452SApple OSS Distributions void
workq_thread_terminate(struct proc * p,struct uthread * uth)942*94d3b452SApple OSS Distributions workq_thread_terminate(struct proc *p, struct uthread *uth)
943*94d3b452SApple OSS Distributions {
944*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
945*94d3b452SApple OSS Distributions
946*94d3b452SApple OSS Distributions workq_lock_spin(wq);
947*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
948*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
949*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
950*94d3b452SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
951*94d3b452SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
952*94d3b452SApple OSS Distributions }
953*94d3b452SApple OSS Distributions if (wq->wq_nthreads-- == wq_max_threads) {
954*94d3b452SApple OSS Distributions /*
955*94d3b452SApple OSS Distributions * We got under the thread limit again, which may have prevented
956*94d3b452SApple OSS Distributions * thread creation from happening, redrive if there are pending requests
957*94d3b452SApple OSS Distributions */
958*94d3b452SApple OSS Distributions if (wq->wq_reqcount) {
959*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
960*94d3b452SApple OSS Distributions }
961*94d3b452SApple OSS Distributions }
962*94d3b452SApple OSS Distributions workq_unlock(wq);
963*94d3b452SApple OSS Distributions
964*94d3b452SApple OSS Distributions thread_deallocate(get_machthread(uth));
965*94d3b452SApple OSS Distributions }
966*94d3b452SApple OSS Distributions
967*94d3b452SApple OSS Distributions static void
workq_kill_old_threads_call(void * param0,void * param1 __unused)968*94d3b452SApple OSS Distributions workq_kill_old_threads_call(void *param0, void *param1 __unused)
969*94d3b452SApple OSS Distributions {
970*94d3b452SApple OSS Distributions struct workqueue *wq = param0;
971*94d3b452SApple OSS Distributions
972*94d3b452SApple OSS Distributions workq_lock_spin(wq);
973*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0);
974*94d3b452SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
975*94d3b452SApple OSS Distributions workq_death_policy_evaluate(wq, 0);
976*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0);
977*94d3b452SApple OSS Distributions workq_unlock(wq);
978*94d3b452SApple OSS Distributions }
979*94d3b452SApple OSS Distributions
980*94d3b452SApple OSS Distributions static struct uthread *
workq_pop_idle_thread(struct workqueue * wq,uint16_t uu_flags,bool * needs_wakeup)981*94d3b452SApple OSS Distributions workq_pop_idle_thread(struct workqueue *wq, uint16_t uu_flags,
982*94d3b452SApple OSS Distributions bool *needs_wakeup)
983*94d3b452SApple OSS Distributions {
984*94d3b452SApple OSS Distributions struct uthread *uth;
985*94d3b452SApple OSS Distributions
986*94d3b452SApple OSS Distributions if ((uth = TAILQ_FIRST(&wq->wq_thidlelist))) {
987*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
988*94d3b452SApple OSS Distributions } else {
989*94d3b452SApple OSS Distributions uth = TAILQ_FIRST(&wq->wq_thnewlist);
990*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
991*94d3b452SApple OSS Distributions }
992*94d3b452SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
993*94d3b452SApple OSS Distributions
994*94d3b452SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
995*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
996*94d3b452SApple OSS Distributions
997*94d3b452SApple OSS Distributions /* A thread is never woken up as part of the cooperative pool */
998*94d3b452SApple OSS Distributions assert((uu_flags & UT_WORKQ_COOPERATIVE) == 0);
999*94d3b452SApple OSS Distributions
1000*94d3b452SApple OSS Distributions if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
1001*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
1002*94d3b452SApple OSS Distributions }
1003*94d3b452SApple OSS Distributions wq->wq_threads_scheduled++;
1004*94d3b452SApple OSS Distributions wq->wq_thidlecount--;
1005*94d3b452SApple OSS Distributions
1006*94d3b452SApple OSS Distributions if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
1007*94d3b452SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_DYING;
1008*94d3b452SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
1009*94d3b452SApple OSS Distributions *needs_wakeup = false;
1010*94d3b452SApple OSS Distributions } else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
1011*94d3b452SApple OSS Distributions *needs_wakeup = false;
1012*94d3b452SApple OSS Distributions } else {
1013*94d3b452SApple OSS Distributions *needs_wakeup = true;
1014*94d3b452SApple OSS Distributions }
1015*94d3b452SApple OSS Distributions return uth;
1016*94d3b452SApple OSS Distributions }
1017*94d3b452SApple OSS Distributions
1018*94d3b452SApple OSS Distributions /*
1019*94d3b452SApple OSS Distributions * Called by thread_create_workq_waiting() during thread initialization, before
1020*94d3b452SApple OSS Distributions * assert_wait, before the thread has been started.
1021*94d3b452SApple OSS Distributions */
1022*94d3b452SApple OSS Distributions event_t
workq_thread_init_and_wq_lock(task_t task,thread_t th)1023*94d3b452SApple OSS Distributions workq_thread_init_and_wq_lock(task_t task, thread_t th)
1024*94d3b452SApple OSS Distributions {
1025*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1026*94d3b452SApple OSS Distributions
1027*94d3b452SApple OSS Distributions uth->uu_workq_flags = UT_WORKQ_NEW;
1028*94d3b452SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
1029*94d3b452SApple OSS Distributions uth->uu_workq_thport = MACH_PORT_NULL;
1030*94d3b452SApple OSS Distributions uth->uu_workq_stackaddr = 0;
1031*94d3b452SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = 0;
1032*94d3b452SApple OSS Distributions
1033*94d3b452SApple OSS Distributions thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
1034*94d3b452SApple OSS Distributions thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
1035*94d3b452SApple OSS Distributions
1036*94d3b452SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(get_bsdtask_info(task)));
1037*94d3b452SApple OSS Distributions return workq_parked_wait_event(uth);
1038*94d3b452SApple OSS Distributions }
1039*94d3b452SApple OSS Distributions
1040*94d3b452SApple OSS Distributions /**
1041*94d3b452SApple OSS Distributions * Try to add a new workqueue thread.
1042*94d3b452SApple OSS Distributions *
1043*94d3b452SApple OSS Distributions * - called with workq lock held
1044*94d3b452SApple OSS Distributions * - dropped and retaken around thread creation
1045*94d3b452SApple OSS Distributions * - return with workq lock held
1046*94d3b452SApple OSS Distributions */
1047*94d3b452SApple OSS Distributions static bool
workq_add_new_idle_thread(proc_t p,struct workqueue * wq)1048*94d3b452SApple OSS Distributions workq_add_new_idle_thread(proc_t p, struct workqueue *wq)
1049*94d3b452SApple OSS Distributions {
1050*94d3b452SApple OSS Distributions mach_vm_offset_t th_stackaddr;
1051*94d3b452SApple OSS Distributions kern_return_t kret;
1052*94d3b452SApple OSS Distributions thread_t th;
1053*94d3b452SApple OSS Distributions
1054*94d3b452SApple OSS Distributions wq->wq_nthreads++;
1055*94d3b452SApple OSS Distributions
1056*94d3b452SApple OSS Distributions workq_unlock(wq);
1057*94d3b452SApple OSS Distributions
1058*94d3b452SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1059*94d3b452SApple OSS Distributions
1060*94d3b452SApple OSS Distributions kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
1061*94d3b452SApple OSS Distributions if (kret != KERN_SUCCESS) {
1062*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1063*94d3b452SApple OSS Distributions kret, 1, 0);
1064*94d3b452SApple OSS Distributions goto out;
1065*94d3b452SApple OSS Distributions }
1066*94d3b452SApple OSS Distributions
1067*94d3b452SApple OSS Distributions kret = thread_create_workq_waiting(proc_task(p), workq_unpark_continue, &th);
1068*94d3b452SApple OSS Distributions if (kret != KERN_SUCCESS) {
1069*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1070*94d3b452SApple OSS Distributions kret, 0, 0);
1071*94d3b452SApple OSS Distributions pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
1072*94d3b452SApple OSS Distributions goto out;
1073*94d3b452SApple OSS Distributions }
1074*94d3b452SApple OSS Distributions
1075*94d3b452SApple OSS Distributions // thread_create_workq_waiting() will return with the wq lock held
1076*94d3b452SApple OSS Distributions // on success, because it calls workq_thread_init_and_wq_lock() above
1077*94d3b452SApple OSS Distributions
1078*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1079*94d3b452SApple OSS Distributions
1080*94d3b452SApple OSS Distributions wq->wq_creations++;
1081*94d3b452SApple OSS Distributions wq->wq_thidlecount++;
1082*94d3b452SApple OSS Distributions uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
1083*94d3b452SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1084*94d3b452SApple OSS Distributions
1085*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0);
1086*94d3b452SApple OSS Distributions return true;
1087*94d3b452SApple OSS Distributions
1088*94d3b452SApple OSS Distributions out:
1089*94d3b452SApple OSS Distributions workq_lock_spin(wq);
1090*94d3b452SApple OSS Distributions /*
1091*94d3b452SApple OSS Distributions * Do not redrive here if we went under wq_max_threads again,
1092*94d3b452SApple OSS Distributions * it is the responsibility of the callers of this function
1093*94d3b452SApple OSS Distributions * to do so when it fails.
1094*94d3b452SApple OSS Distributions */
1095*94d3b452SApple OSS Distributions wq->wq_nthreads--;
1096*94d3b452SApple OSS Distributions return false;
1097*94d3b452SApple OSS Distributions }
1098*94d3b452SApple OSS Distributions
1099*94d3b452SApple OSS Distributions static inline bool
workq_thread_is_overcommit(struct uthread * uth)1100*94d3b452SApple OSS Distributions workq_thread_is_overcommit(struct uthread *uth)
1101*94d3b452SApple OSS Distributions {
1102*94d3b452SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0;
1103*94d3b452SApple OSS Distributions }
1104*94d3b452SApple OSS Distributions
1105*94d3b452SApple OSS Distributions static inline bool
workq_thread_is_nonovercommit(struct uthread * uth)1106*94d3b452SApple OSS Distributions workq_thread_is_nonovercommit(struct uthread *uth)
1107*94d3b452SApple OSS Distributions {
1108*94d3b452SApple OSS Distributions return (uth->uu_workq_flags & (UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE)) == 0;
1109*94d3b452SApple OSS Distributions }
1110*94d3b452SApple OSS Distributions
1111*94d3b452SApple OSS Distributions static inline bool
workq_thread_is_cooperative(struct uthread * uth)1112*94d3b452SApple OSS Distributions workq_thread_is_cooperative(struct uthread *uth)
1113*94d3b452SApple OSS Distributions {
1114*94d3b452SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_COOPERATIVE) != 0;
1115*94d3b452SApple OSS Distributions }
1116*94d3b452SApple OSS Distributions
1117*94d3b452SApple OSS Distributions static inline void
workq_thread_set_type(struct uthread * uth,uint16_t flags)1118*94d3b452SApple OSS Distributions workq_thread_set_type(struct uthread *uth, uint16_t flags)
1119*94d3b452SApple OSS Distributions {
1120*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1121*94d3b452SApple OSS Distributions uth->uu_workq_flags |= flags;
1122*94d3b452SApple OSS Distributions }
1123*94d3b452SApple OSS Distributions
1124*94d3b452SApple OSS Distributions
1125*94d3b452SApple OSS Distributions #define WORKQ_UNPARK_FOR_DEATH_WAS_IDLE 0x1
1126*94d3b452SApple OSS Distributions
1127*94d3b452SApple OSS Distributions __attribute__((noreturn, noinline))
1128*94d3b452SApple OSS Distributions static void
workq_unpark_for_death_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t death_flags,uint32_t setup_flags)1129*94d3b452SApple OSS Distributions workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
1130*94d3b452SApple OSS Distributions struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
1131*94d3b452SApple OSS Distributions {
1132*94d3b452SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
1133*94d3b452SApple OSS Distributions bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
1134*94d3b452SApple OSS Distributions
1135*94d3b452SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
1136*94d3b452SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
1137*94d3b452SApple OSS Distributions qos = WORKQ_THREAD_QOS_CLEANUP;
1138*94d3b452SApple OSS Distributions }
1139*94d3b452SApple OSS Distributions
1140*94d3b452SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
1141*94d3b452SApple OSS Distributions
1142*94d3b452SApple OSS Distributions if (death_flags & WORKQ_UNPARK_FOR_DEATH_WAS_IDLE) {
1143*94d3b452SApple OSS Distributions wq->wq_thidlecount--;
1144*94d3b452SApple OSS Distributions if (first_use) {
1145*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1146*94d3b452SApple OSS Distributions } else {
1147*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1148*94d3b452SApple OSS Distributions }
1149*94d3b452SApple OSS Distributions }
1150*94d3b452SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1151*94d3b452SApple OSS Distributions
1152*94d3b452SApple OSS Distributions workq_unlock(wq);
1153*94d3b452SApple OSS Distributions
1154*94d3b452SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
1155*94d3b452SApple OSS Distributions __assert_only kern_return_t kr;
1156*94d3b452SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
1157*94d3b452SApple OSS Distributions assert(kr == KERN_SUCCESS);
1158*94d3b452SApple OSS Distributions }
1159*94d3b452SApple OSS Distributions
1160*94d3b452SApple OSS Distributions uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
1161*94d3b452SApple OSS Distributions thread_t th = get_machthread(uth);
1162*94d3b452SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1163*94d3b452SApple OSS Distributions
1164*94d3b452SApple OSS Distributions if (!first_use) {
1165*94d3b452SApple OSS Distributions flags |= WQ_FLAG_THREAD_REUSE;
1166*94d3b452SApple OSS Distributions }
1167*94d3b452SApple OSS Distributions
1168*94d3b452SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
1169*94d3b452SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
1170*94d3b452SApple OSS Distributions __builtin_unreachable();
1171*94d3b452SApple OSS Distributions }
1172*94d3b452SApple OSS Distributions
1173*94d3b452SApple OSS Distributions bool
workq_is_current_thread_updating_turnstile(struct workqueue * wq)1174*94d3b452SApple OSS Distributions workq_is_current_thread_updating_turnstile(struct workqueue *wq)
1175*94d3b452SApple OSS Distributions {
1176*94d3b452SApple OSS Distributions return wq->wq_turnstile_updater == current_thread();
1177*94d3b452SApple OSS Distributions }
1178*94d3b452SApple OSS Distributions
1179*94d3b452SApple OSS Distributions __attribute__((always_inline))
1180*94d3b452SApple OSS Distributions static inline void
1181*94d3b452SApple OSS Distributions workq_perform_turnstile_operation_locked(struct workqueue *wq,
1182*94d3b452SApple OSS Distributions void (^operation)(void))
1183*94d3b452SApple OSS Distributions {
1184*94d3b452SApple OSS Distributions workq_lock_held(wq);
1185*94d3b452SApple OSS Distributions wq->wq_turnstile_updater = current_thread();
1186*94d3b452SApple OSS Distributions operation();
1187*94d3b452SApple OSS Distributions wq->wq_turnstile_updater = THREAD_NULL;
1188*94d3b452SApple OSS Distributions }
1189*94d3b452SApple OSS Distributions
1190*94d3b452SApple OSS Distributions static void
workq_turnstile_update_inheritor(struct workqueue * wq,turnstile_inheritor_t inheritor,turnstile_update_flags_t flags)1191*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(struct workqueue *wq,
1192*94d3b452SApple OSS Distributions turnstile_inheritor_t inheritor,
1193*94d3b452SApple OSS Distributions turnstile_update_flags_t flags)
1194*94d3b452SApple OSS Distributions {
1195*94d3b452SApple OSS Distributions if (wq->wq_inheritor == inheritor) {
1196*94d3b452SApple OSS Distributions return;
1197*94d3b452SApple OSS Distributions }
1198*94d3b452SApple OSS Distributions wq->wq_inheritor = inheritor;
1199*94d3b452SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
1200*94d3b452SApple OSS Distributions turnstile_update_inheritor(wq->wq_turnstile, inheritor,
1201*94d3b452SApple OSS Distributions flags | TURNSTILE_IMMEDIATE_UPDATE);
1202*94d3b452SApple OSS Distributions turnstile_update_inheritor_complete(wq->wq_turnstile,
1203*94d3b452SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
1204*94d3b452SApple OSS Distributions });
1205*94d3b452SApple OSS Distributions }
1206*94d3b452SApple OSS Distributions
1207*94d3b452SApple OSS Distributions static void
workq_push_idle_thread(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)1208*94d3b452SApple OSS Distributions workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
1209*94d3b452SApple OSS Distributions uint32_t setup_flags)
1210*94d3b452SApple OSS Distributions {
1211*94d3b452SApple OSS Distributions uint64_t now = mach_absolute_time();
1212*94d3b452SApple OSS Distributions bool is_creator = (uth == wq->wq_creator);
1213*94d3b452SApple OSS Distributions
1214*94d3b452SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
1215*94d3b452SApple OSS Distributions assert(!is_creator);
1216*94d3b452SApple OSS Distributions
1217*94d3b452SApple OSS Distributions thread_qos_t thread_qos = uth->uu_workq_pri.qos_req;
1218*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, thread_qos);
1219*94d3b452SApple OSS Distributions
1220*94d3b452SApple OSS Distributions /* Before we get here, we always go through
1221*94d3b452SApple OSS Distributions * workq_select_threadreq_or_park_and_unlock. If we got here, it means
1222*94d3b452SApple OSS Distributions * that we went through the logic in workq_threadreq_select which
1223*94d3b452SApple OSS Distributions * did the refresh for the next best cooperative qos while
1224*94d3b452SApple OSS Distributions * excluding the current thread - we shouldn't need to do it again.
1225*94d3b452SApple OSS Distributions */
1226*94d3b452SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
1227*94d3b452SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
1228*94d3b452SApple OSS Distributions assert(!is_creator);
1229*94d3b452SApple OSS Distributions
1230*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
1231*94d3b452SApple OSS Distributions }
1232*94d3b452SApple OSS Distributions
1233*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1234*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
1235*94d3b452SApple OSS Distributions wq->wq_threads_scheduled--;
1236*94d3b452SApple OSS Distributions
1237*94d3b452SApple OSS Distributions if (is_creator) {
1238*94d3b452SApple OSS Distributions wq->wq_creator = NULL;
1239*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
1240*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
1241*94d3b452SApple OSS Distributions }
1242*94d3b452SApple OSS Distributions
1243*94d3b452SApple OSS Distributions if (wq->wq_inheritor == get_machthread(uth)) {
1244*94d3b452SApple OSS Distributions assert(wq->wq_creator == NULL);
1245*94d3b452SApple OSS Distributions if (wq->wq_reqcount) {
1246*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
1247*94d3b452SApple OSS Distributions } else {
1248*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
1249*94d3b452SApple OSS Distributions }
1250*94d3b452SApple OSS Distributions }
1251*94d3b452SApple OSS Distributions
1252*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
1253*94d3b452SApple OSS Distributions assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
1254*94d3b452SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1255*94d3b452SApple OSS Distributions wq->wq_thidlecount++;
1256*94d3b452SApple OSS Distributions return;
1257*94d3b452SApple OSS Distributions }
1258*94d3b452SApple OSS Distributions
1259*94d3b452SApple OSS Distributions if (!is_creator) {
1260*94d3b452SApple OSS Distributions _wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
1261*94d3b452SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
1262*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
1263*94d3b452SApple OSS Distributions }
1264*94d3b452SApple OSS Distributions
1265*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp = now;
1266*94d3b452SApple OSS Distributions
1267*94d3b452SApple OSS Distributions struct uthread *oldest = workq_oldest_killable_idle_thread(wq);
1268*94d3b452SApple OSS Distributions uint16_t cur_idle = wq->wq_thidlecount;
1269*94d3b452SApple OSS Distributions
1270*94d3b452SApple OSS Distributions if (cur_idle >= wq_max_constrained_threads ||
1271*94d3b452SApple OSS Distributions (wq->wq_thdying_count == 0 && oldest &&
1272*94d3b452SApple OSS Distributions workq_should_kill_idle_thread(wq, oldest, now))) {
1273*94d3b452SApple OSS Distributions /*
1274*94d3b452SApple OSS Distributions * Immediately kill threads if we have too may of them.
1275*94d3b452SApple OSS Distributions *
1276*94d3b452SApple OSS Distributions * And swap "place" with the oldest one we'd have woken up.
1277*94d3b452SApple OSS Distributions * This is a relatively desperate situation where we really
1278*94d3b452SApple OSS Distributions * need to kill threads quickly and it's best to kill
1279*94d3b452SApple OSS Distributions * the one that's currently on core than context switching.
1280*94d3b452SApple OSS Distributions */
1281*94d3b452SApple OSS Distributions if (oldest) {
1282*94d3b452SApple OSS Distributions oldest->uu_save.uus_workq_park_data.idle_stamp = now;
1283*94d3b452SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, oldest, uu_workq_entry);
1284*94d3b452SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, oldest, uu_workq_entry);
1285*94d3b452SApple OSS Distributions }
1286*94d3b452SApple OSS Distributions
1287*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
1288*94d3b452SApple OSS Distributions wq, cur_idle, 0, 0);
1289*94d3b452SApple OSS Distributions wq->wq_thdying_count++;
1290*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
1291*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
1292*94d3b452SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
1293*94d3b452SApple OSS Distributions __builtin_unreachable();
1294*94d3b452SApple OSS Distributions }
1295*94d3b452SApple OSS Distributions
1296*94d3b452SApple OSS Distributions struct uthread *tail = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
1297*94d3b452SApple OSS Distributions
1298*94d3b452SApple OSS Distributions cur_idle += 1;
1299*94d3b452SApple OSS Distributions wq->wq_thidlecount = cur_idle;
1300*94d3b452SApple OSS Distributions
1301*94d3b452SApple OSS Distributions if (cur_idle >= wq_death_max_load && tail &&
1302*94d3b452SApple OSS Distributions tail->uu_save.uus_workq_park_data.has_stack) {
1303*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = false;
1304*94d3b452SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
1305*94d3b452SApple OSS Distributions } else {
1306*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = true;
1307*94d3b452SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, uth, uu_workq_entry);
1308*94d3b452SApple OSS Distributions }
1309*94d3b452SApple OSS Distributions
1310*94d3b452SApple OSS Distributions if (!tail) {
1311*94d3b452SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
1312*94d3b452SApple OSS Distributions workq_death_call_schedule(wq, now + delay);
1313*94d3b452SApple OSS Distributions }
1314*94d3b452SApple OSS Distributions }
1315*94d3b452SApple OSS Distributions
1316*94d3b452SApple OSS Distributions #pragma mark thread requests
1317*94d3b452SApple OSS Distributions
1318*94d3b452SApple OSS Distributions static inline bool
workq_tr_is_overcommit(workq_tr_flags_t tr_flags)1319*94d3b452SApple OSS Distributions workq_tr_is_overcommit(workq_tr_flags_t tr_flags)
1320*94d3b452SApple OSS Distributions {
1321*94d3b452SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) != 0;
1322*94d3b452SApple OSS Distributions }
1323*94d3b452SApple OSS Distributions
1324*94d3b452SApple OSS Distributions static inline bool
workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)1325*94d3b452SApple OSS Distributions workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)
1326*94d3b452SApple OSS Distributions {
1327*94d3b452SApple OSS Distributions return (tr_flags & (WORKQ_TR_FLAG_OVERCOMMIT | WORKQ_TR_FLAG_COOPERATIVE)) == 0;
1328*94d3b452SApple OSS Distributions }
1329*94d3b452SApple OSS Distributions
1330*94d3b452SApple OSS Distributions static inline bool
workq_tr_is_cooperative(workq_tr_flags_t tr_flags)1331*94d3b452SApple OSS Distributions workq_tr_is_cooperative(workq_tr_flags_t tr_flags)
1332*94d3b452SApple OSS Distributions {
1333*94d3b452SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_COOPERATIVE) != 0;
1334*94d3b452SApple OSS Distributions }
1335*94d3b452SApple OSS Distributions
1336*94d3b452SApple OSS Distributions #define workq_threadreq_is_overcommit(req) workq_tr_is_overcommit((req)->tr_flags)
1337*94d3b452SApple OSS Distributions #define workq_threadreq_is_nonovercommit(req) workq_tr_is_nonovercommit((req)->tr_flags)
1338*94d3b452SApple OSS Distributions #define workq_threadreq_is_cooperative(req) workq_tr_is_cooperative((req)->tr_flags)
1339*94d3b452SApple OSS Distributions
1340*94d3b452SApple OSS Distributions static inline int
workq_priority_for_req(workq_threadreq_t req)1341*94d3b452SApple OSS Distributions workq_priority_for_req(workq_threadreq_t req)
1342*94d3b452SApple OSS Distributions {
1343*94d3b452SApple OSS Distributions thread_qos_t qos = req->tr_qos;
1344*94d3b452SApple OSS Distributions
1345*94d3b452SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1346*94d3b452SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
1347*94d3b452SApple OSS Distributions assert(trp.trp_flags & TRP_PRIORITY);
1348*94d3b452SApple OSS Distributions return trp.trp_pri;
1349*94d3b452SApple OSS Distributions }
1350*94d3b452SApple OSS Distributions return thread_workq_pri_for_qos(qos);
1351*94d3b452SApple OSS Distributions }
1352*94d3b452SApple OSS Distributions
1353*94d3b452SApple OSS Distributions static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue * wq,workq_threadreq_t req)1354*94d3b452SApple OSS Distributions workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
1355*94d3b452SApple OSS Distributions {
1356*94d3b452SApple OSS Distributions assert(!workq_tr_is_cooperative(req->tr_flags));
1357*94d3b452SApple OSS Distributions
1358*94d3b452SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1359*94d3b452SApple OSS Distributions return &wq->wq_special_queue;
1360*94d3b452SApple OSS Distributions } else if (workq_tr_is_overcommit(req->tr_flags)) {
1361*94d3b452SApple OSS Distributions return &wq->wq_overcommit_queue;
1362*94d3b452SApple OSS Distributions } else {
1363*94d3b452SApple OSS Distributions return &wq->wq_constrained_queue;
1364*94d3b452SApple OSS Distributions }
1365*94d3b452SApple OSS Distributions }
1366*94d3b452SApple OSS Distributions
1367*94d3b452SApple OSS Distributions
1368*94d3b452SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1369*94d3b452SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos(struct workqueue * wq,thread_qos_t qos)1370*94d3b452SApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos(struct workqueue *wq, thread_qos_t qos)
1371*94d3b452SApple OSS Distributions {
1372*94d3b452SApple OSS Distributions workq_lock_held(wq);
1373*94d3b452SApple OSS Distributions
1374*94d3b452SApple OSS Distributions uint64_t num_cooperative_threads = 0;
1375*94d3b452SApple OSS Distributions
1376*94d3b452SApple OSS Distributions for (thread_qos_t cur_qos = WORKQ_THREAD_QOS_MAX; cur_qos >= qos; cur_qos--) {
1377*94d3b452SApple OSS Distributions uint8_t bucket = _wq_bucket(cur_qos);
1378*94d3b452SApple OSS Distributions num_cooperative_threads += wq->wq_cooperative_queue_scheduled_count[bucket];
1379*94d3b452SApple OSS Distributions }
1380*94d3b452SApple OSS Distributions
1381*94d3b452SApple OSS Distributions return num_cooperative_threads;
1382*94d3b452SApple OSS Distributions }
1383*94d3b452SApple OSS Distributions
1384*94d3b452SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_total(struct workqueue * wq)1385*94d3b452SApple OSS Distributions workq_num_cooperative_threads_scheduled_total(struct workqueue *wq)
1386*94d3b452SApple OSS Distributions {
1387*94d3b452SApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos(wq, WORKQ_THREAD_QOS_MIN);
1388*94d3b452SApple OSS Distributions }
1389*94d3b452SApple OSS Distributions
1390*94d3b452SApple OSS Distributions #if DEBUG || DEVELOPMENT
1391*94d3b452SApple OSS Distributions static bool
workq_has_cooperative_thread_requests(struct workqueue * wq)1392*94d3b452SApple OSS Distributions workq_has_cooperative_thread_requests(struct workqueue *wq)
1393*94d3b452SApple OSS Distributions {
1394*94d3b452SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1395*94d3b452SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1396*94d3b452SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1397*94d3b452SApple OSS Distributions return true;
1398*94d3b452SApple OSS Distributions }
1399*94d3b452SApple OSS Distributions }
1400*94d3b452SApple OSS Distributions
1401*94d3b452SApple OSS Distributions return false;
1402*94d3b452SApple OSS Distributions }
1403*94d3b452SApple OSS Distributions #endif
1404*94d3b452SApple OSS Distributions
1405*94d3b452SApple OSS Distributions /*
1406*94d3b452SApple OSS Distributions * Determines the next QoS bucket we should service next in the cooperative
1407*94d3b452SApple OSS Distributions * pool. This function will always return a QoS for cooperative pool as long as
1408*94d3b452SApple OSS Distributions * there are requests to be serviced.
1409*94d3b452SApple OSS Distributions *
1410*94d3b452SApple OSS Distributions * Unlike the other thread pools, for the cooperative thread pool the schedule
1411*94d3b452SApple OSS Distributions * counts for the various buckets in the pool affect the next best request for
1412*94d3b452SApple OSS Distributions * it.
1413*94d3b452SApple OSS Distributions *
1414*94d3b452SApple OSS Distributions * This function is called in the following contexts:
1415*94d3b452SApple OSS Distributions *
1416*94d3b452SApple OSS Distributions * a) When determining the best thread QoS for cooperative bucket for the
1417*94d3b452SApple OSS Distributions * creator/thread reuse
1418*94d3b452SApple OSS Distributions *
1419*94d3b452SApple OSS Distributions * b) Once (a) has happened and thread has bound to a thread request, figuring
1420*94d3b452SApple OSS Distributions * out whether the next best request for this pool has changed so that creator
1421*94d3b452SApple OSS Distributions * can be scheduled.
1422*94d3b452SApple OSS Distributions *
1423*94d3b452SApple OSS Distributions * Returns true if the cooperative queue's best qos changed from previous
1424*94d3b452SApple OSS Distributions * value.
1425*94d3b452SApple OSS Distributions */
1426*94d3b452SApple OSS Distributions static bool
_wq_cooperative_queue_refresh_best_req_qos(struct workqueue * wq)1427*94d3b452SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq)
1428*94d3b452SApple OSS Distributions {
1429*94d3b452SApple OSS Distributions workq_lock_held(wq);
1430*94d3b452SApple OSS Distributions
1431*94d3b452SApple OSS Distributions thread_qos_t old_best_req_qos = wq->wq_cooperative_queue_best_req_qos;
1432*94d3b452SApple OSS Distributions
1433*94d3b452SApple OSS Distributions /* We determine the next best cooperative thread request based on the
1434*94d3b452SApple OSS Distributions * following:
1435*94d3b452SApple OSS Distributions *
1436*94d3b452SApple OSS Distributions * 1. Take the MAX of the following:
1437*94d3b452SApple OSS Distributions * a) Highest qos with pending TRs such that number of scheduled
1438*94d3b452SApple OSS Distributions * threads so far with >= qos is < wq_max_cooperative_threads
1439*94d3b452SApple OSS Distributions * b) Highest qos bucket with pending TRs but no scheduled threads for that bucket
1440*94d3b452SApple OSS Distributions *
1441*94d3b452SApple OSS Distributions * 2. If the result of (1) is UN, then we pick the highest priority amongst
1442*94d3b452SApple OSS Distributions * pending thread requests in the pool.
1443*94d3b452SApple OSS Distributions *
1444*94d3b452SApple OSS Distributions */
1445*94d3b452SApple OSS Distributions thread_qos_t highest_qos_with_no_scheduled = THREAD_QOS_UNSPECIFIED;
1446*94d3b452SApple OSS Distributions thread_qos_t highest_qos_req_with_width = THREAD_QOS_UNSPECIFIED;
1447*94d3b452SApple OSS Distributions
1448*94d3b452SApple OSS Distributions thread_qos_t highest_qos_req = THREAD_QOS_UNSPECIFIED;
1449*94d3b452SApple OSS Distributions
1450*94d3b452SApple OSS Distributions int scheduled_count_till_qos = 0;
1451*94d3b452SApple OSS Distributions
1452*94d3b452SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1453*94d3b452SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1454*94d3b452SApple OSS Distributions uint8_t scheduled_count_for_bucket = wq->wq_cooperative_queue_scheduled_count[bucket];
1455*94d3b452SApple OSS Distributions scheduled_count_till_qos += scheduled_count_for_bucket;
1456*94d3b452SApple OSS Distributions
1457*94d3b452SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1458*94d3b452SApple OSS Distributions if (qos > highest_qos_req) {
1459*94d3b452SApple OSS Distributions highest_qos_req = qos;
1460*94d3b452SApple OSS Distributions }
1461*94d3b452SApple OSS Distributions /*
1462*94d3b452SApple OSS Distributions * The pool isn't saturated for threads at and above this QoS, and
1463*94d3b452SApple OSS Distributions * this qos bucket has pending requests
1464*94d3b452SApple OSS Distributions */
1465*94d3b452SApple OSS Distributions if (scheduled_count_till_qos < wq_cooperative_queue_max_size(wq)) {
1466*94d3b452SApple OSS Distributions if (qos > highest_qos_req_with_width) {
1467*94d3b452SApple OSS Distributions highest_qos_req_with_width = qos;
1468*94d3b452SApple OSS Distributions }
1469*94d3b452SApple OSS Distributions }
1470*94d3b452SApple OSS Distributions
1471*94d3b452SApple OSS Distributions /*
1472*94d3b452SApple OSS Distributions * There are no threads scheduled for this bucket but there
1473*94d3b452SApple OSS Distributions * is work pending, give it at least 1 thread
1474*94d3b452SApple OSS Distributions */
1475*94d3b452SApple OSS Distributions if (scheduled_count_for_bucket == 0) {
1476*94d3b452SApple OSS Distributions if (qos > highest_qos_with_no_scheduled) {
1477*94d3b452SApple OSS Distributions highest_qos_with_no_scheduled = qos;
1478*94d3b452SApple OSS Distributions }
1479*94d3b452SApple OSS Distributions }
1480*94d3b452SApple OSS Distributions }
1481*94d3b452SApple OSS Distributions }
1482*94d3b452SApple OSS Distributions
1483*94d3b452SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = MAX(highest_qos_with_no_scheduled, highest_qos_req_with_width);
1484*94d3b452SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1485*94d3b452SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = highest_qos_req;
1486*94d3b452SApple OSS Distributions }
1487*94d3b452SApple OSS Distributions
1488*94d3b452SApple OSS Distributions #if DEBUG || DEVELOPMENT
1489*94d3b452SApple OSS Distributions /* Assert that if we are showing up the next best req as UN, then there
1490*94d3b452SApple OSS Distributions * actually is no thread request in the cooperative pool buckets */
1491*94d3b452SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1492*94d3b452SApple OSS Distributions assert(!workq_has_cooperative_thread_requests(wq));
1493*94d3b452SApple OSS Distributions }
1494*94d3b452SApple OSS Distributions #endif
1495*94d3b452SApple OSS Distributions
1496*94d3b452SApple OSS Distributions return old_best_req_qos != wq->wq_cooperative_queue_best_req_qos;
1497*94d3b452SApple OSS Distributions }
1498*94d3b452SApple OSS Distributions
1499*94d3b452SApple OSS Distributions /*
1500*94d3b452SApple OSS Distributions * Returns whether or not the input thread (or creator thread if uth is NULL)
1501*94d3b452SApple OSS Distributions * should be allowed to work as part of the cooperative pool for the <input qos>
1502*94d3b452SApple OSS Distributions * bucket.
1503*94d3b452SApple OSS Distributions *
1504*94d3b452SApple OSS Distributions * This function is called in a bunch of places:
1505*94d3b452SApple OSS Distributions * a) Quantum expires for a thread and it is part of the cooperative pool
1506*94d3b452SApple OSS Distributions * b) When trying to pick a thread request for the creator thread to
1507*94d3b452SApple OSS Distributions * represent.
1508*94d3b452SApple OSS Distributions * c) When a thread is trying to pick a thread request to actually bind to
1509*94d3b452SApple OSS Distributions * and service.
1510*94d3b452SApple OSS Distributions *
1511*94d3b452SApple OSS Distributions * Called with workq lock held.
1512*94d3b452SApple OSS Distributions */
1513*94d3b452SApple OSS Distributions
1514*94d3b452SApple OSS Distributions #define WQ_COOPERATIVE_POOL_UNSATURATED 1
1515*94d3b452SApple OSS Distributions #define WQ_COOPERATIVE_BUCKET_UNSERVICED 2
1516*94d3b452SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS 3
1517*94d3b452SApple OSS Distributions
1518*94d3b452SApple OSS Distributions static bool
workq_cooperative_allowance(struct workqueue * wq,thread_qos_t qos,struct uthread * uth,bool may_start_timer)1519*94d3b452SApple OSS Distributions workq_cooperative_allowance(struct workqueue *wq, thread_qos_t qos, struct uthread *uth,
1520*94d3b452SApple OSS Distributions bool may_start_timer)
1521*94d3b452SApple OSS Distributions {
1522*94d3b452SApple OSS Distributions workq_lock_held(wq);
1523*94d3b452SApple OSS Distributions
1524*94d3b452SApple OSS Distributions bool exclude_thread_as_scheduled = false;
1525*94d3b452SApple OSS Distributions bool passed_admissions = false;
1526*94d3b452SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1527*94d3b452SApple OSS Distributions
1528*94d3b452SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
1529*94d3b452SApple OSS Distributions exclude_thread_as_scheduled = true;
1530*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
1531*94d3b452SApple OSS Distributions }
1532*94d3b452SApple OSS Distributions
1533*94d3b452SApple OSS Distributions /*
1534*94d3b452SApple OSS Distributions * We have not saturated the pool yet, let this thread continue
1535*94d3b452SApple OSS Distributions */
1536*94d3b452SApple OSS Distributions uint64_t total_cooperative_threads;
1537*94d3b452SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
1538*94d3b452SApple OSS Distributions if (total_cooperative_threads < wq_cooperative_queue_max_size(wq)) {
1539*94d3b452SApple OSS Distributions passed_admissions = true;
1540*94d3b452SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1541*94d3b452SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1542*94d3b452SApple OSS Distributions WQ_COOPERATIVE_POOL_UNSATURATED);
1543*94d3b452SApple OSS Distributions goto out;
1544*94d3b452SApple OSS Distributions }
1545*94d3b452SApple OSS Distributions
1546*94d3b452SApple OSS Distributions /*
1547*94d3b452SApple OSS Distributions * Without this thread, nothing is servicing the bucket which has pending
1548*94d3b452SApple OSS Distributions * work
1549*94d3b452SApple OSS Distributions */
1550*94d3b452SApple OSS Distributions uint64_t bucket_scheduled = wq->wq_cooperative_queue_scheduled_count[bucket];
1551*94d3b452SApple OSS Distributions if (bucket_scheduled == 0 &&
1552*94d3b452SApple OSS Distributions !STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1553*94d3b452SApple OSS Distributions passed_admissions = true;
1554*94d3b452SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1555*94d3b452SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1556*94d3b452SApple OSS Distributions WQ_COOPERATIVE_BUCKET_UNSERVICED);
1557*94d3b452SApple OSS Distributions goto out;
1558*94d3b452SApple OSS Distributions }
1559*94d3b452SApple OSS Distributions
1560*94d3b452SApple OSS Distributions /*
1561*94d3b452SApple OSS Distributions * If number of threads at the QoS bucket >= input QoS exceeds the max we want
1562*94d3b452SApple OSS Distributions * for the pool, deny this thread
1563*94d3b452SApple OSS Distributions */
1564*94d3b452SApple OSS Distributions uint64_t aggregate_down_to_qos = workq_num_cooperative_threads_scheduled_to_qos(wq, qos);
1565*94d3b452SApple OSS Distributions passed_admissions = (aggregate_down_to_qos < wq_cooperative_queue_max_size(wq));
1566*94d3b452SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE, aggregate_down_to_qos,
1567*94d3b452SApple OSS Distributions qos, passed_admissions, WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS);
1568*94d3b452SApple OSS Distributions
1569*94d3b452SApple OSS Distributions if (!passed_admissions && may_start_timer) {
1570*94d3b452SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
1571*94d3b452SApple OSS Distributions }
1572*94d3b452SApple OSS Distributions
1573*94d3b452SApple OSS Distributions out:
1574*94d3b452SApple OSS Distributions if (exclude_thread_as_scheduled) {
1575*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
1576*94d3b452SApple OSS Distributions }
1577*94d3b452SApple OSS Distributions return passed_admissions;
1578*94d3b452SApple OSS Distributions }
1579*94d3b452SApple OSS Distributions
1580*94d3b452SApple OSS Distributions /*
1581*94d3b452SApple OSS Distributions * returns true if the best request for the pool changed as a result of
1582*94d3b452SApple OSS Distributions * enqueuing this thread request.
1583*94d3b452SApple OSS Distributions */
1584*94d3b452SApple OSS Distributions static bool
workq_threadreq_enqueue(struct workqueue * wq,workq_threadreq_t req)1585*94d3b452SApple OSS Distributions workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
1586*94d3b452SApple OSS Distributions {
1587*94d3b452SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_NEW);
1588*94d3b452SApple OSS Distributions
1589*94d3b452SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_QUEUED;
1590*94d3b452SApple OSS Distributions wq->wq_reqcount += req->tr_count;
1591*94d3b452SApple OSS Distributions
1592*94d3b452SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1593*94d3b452SApple OSS Distributions assert(wq->wq_event_manager_threadreq == NULL);
1594*94d3b452SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
1595*94d3b452SApple OSS Distributions assert(req->tr_count == 1);
1596*94d3b452SApple OSS Distributions wq->wq_event_manager_threadreq = req;
1597*94d3b452SApple OSS Distributions return true;
1598*94d3b452SApple OSS Distributions }
1599*94d3b452SApple OSS Distributions
1600*94d3b452SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1601*94d3b452SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1602*94d3b452SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1603*94d3b452SApple OSS Distributions
1604*94d3b452SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1605*94d3b452SApple OSS Distributions STAILQ_INSERT_TAIL(bucket, req, tr_link);
1606*94d3b452SApple OSS Distributions
1607*94d3b452SApple OSS Distributions return _wq_cooperative_queue_refresh_best_req_qos(wq);
1608*94d3b452SApple OSS Distributions }
1609*94d3b452SApple OSS Distributions
1610*94d3b452SApple OSS Distributions struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
1611*94d3b452SApple OSS Distributions
1612*94d3b452SApple OSS Distributions priority_queue_entry_set_sched_pri(q, &req->tr_entry,
1613*94d3b452SApple OSS Distributions workq_priority_for_req(req), false);
1614*94d3b452SApple OSS Distributions
1615*94d3b452SApple OSS Distributions if (priority_queue_insert(q, &req->tr_entry)) {
1616*94d3b452SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1617*94d3b452SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1618*94d3b452SApple OSS Distributions }
1619*94d3b452SApple OSS Distributions return true;
1620*94d3b452SApple OSS Distributions }
1621*94d3b452SApple OSS Distributions return false;
1622*94d3b452SApple OSS Distributions }
1623*94d3b452SApple OSS Distributions
1624*94d3b452SApple OSS Distributions /*
1625*94d3b452SApple OSS Distributions * returns true if one of the following is true (so as to update creator if
1626*94d3b452SApple OSS Distributions * needed):
1627*94d3b452SApple OSS Distributions *
1628*94d3b452SApple OSS Distributions * (a) the next highest request of the pool we dequeued the request from changed
1629*94d3b452SApple OSS Distributions * (b) the next highest requests of the pool the current thread used to be a
1630*94d3b452SApple OSS Distributions * part of, changed
1631*94d3b452SApple OSS Distributions *
1632*94d3b452SApple OSS Distributions * For overcommit, special and constrained pools, the next highest QoS for each
1633*94d3b452SApple OSS Distributions * pool just a MAX of pending requests so tracking (a) is sufficient.
1634*94d3b452SApple OSS Distributions *
1635*94d3b452SApple OSS Distributions * But for cooperative thread pool, the next highest QoS for the pool depends on
1636*94d3b452SApple OSS Distributions * schedule counts in the pool as well. So if the current thread used to be
1637*94d3b452SApple OSS Distributions * cooperative in it's previous logical run ie (b), then that can also affect
1638*94d3b452SApple OSS Distributions * cooperative pool's next best QoS requests.
1639*94d3b452SApple OSS Distributions */
1640*94d3b452SApple OSS Distributions static bool
workq_threadreq_dequeue(struct workqueue * wq,workq_threadreq_t req,bool cooperative_sched_count_changed)1641*94d3b452SApple OSS Distributions workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req,
1642*94d3b452SApple OSS Distributions bool cooperative_sched_count_changed)
1643*94d3b452SApple OSS Distributions {
1644*94d3b452SApple OSS Distributions wq->wq_reqcount--;
1645*94d3b452SApple OSS Distributions
1646*94d3b452SApple OSS Distributions bool next_highest_request_changed = false;
1647*94d3b452SApple OSS Distributions
1648*94d3b452SApple OSS Distributions if (--req->tr_count == 0) {
1649*94d3b452SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1650*94d3b452SApple OSS Distributions assert(wq->wq_event_manager_threadreq == req);
1651*94d3b452SApple OSS Distributions assert(req->tr_count == 0);
1652*94d3b452SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
1653*94d3b452SApple OSS Distributions
1654*94d3b452SApple OSS Distributions /* If a cooperative thread was the one which picked up the manager
1655*94d3b452SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool
1656*94d3b452SApple OSS Distributions * anyways.
1657*94d3b452SApple OSS Distributions */
1658*94d3b452SApple OSS Distributions if (cooperative_sched_count_changed) {
1659*94d3b452SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
1660*94d3b452SApple OSS Distributions }
1661*94d3b452SApple OSS Distributions return true;
1662*94d3b452SApple OSS Distributions }
1663*94d3b452SApple OSS Distributions
1664*94d3b452SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1665*94d3b452SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1666*94d3b452SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1667*94d3b452SApple OSS Distributions /* Account for the fact that BG and MT are coalesced when
1668*94d3b452SApple OSS Distributions * calculating best request for cooperative pool
1669*94d3b452SApple OSS Distributions */
1670*94d3b452SApple OSS Distributions assert(_wq_bucket(req->tr_qos) == _wq_bucket(wq->wq_cooperative_queue_best_req_qos));
1671*94d3b452SApple OSS Distributions
1672*94d3b452SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1673*94d3b452SApple OSS Distributions __assert_only workq_threadreq_t head = STAILQ_FIRST(bucket);
1674*94d3b452SApple OSS Distributions
1675*94d3b452SApple OSS Distributions assert(head == req);
1676*94d3b452SApple OSS Distributions STAILQ_REMOVE_HEAD(bucket, tr_link);
1677*94d3b452SApple OSS Distributions
1678*94d3b452SApple OSS Distributions /*
1679*94d3b452SApple OSS Distributions * If the request we're dequeueing is cooperative, then the sched
1680*94d3b452SApple OSS Distributions * counts definitely changed.
1681*94d3b452SApple OSS Distributions */
1682*94d3b452SApple OSS Distributions assert(cooperative_sched_count_changed);
1683*94d3b452SApple OSS Distributions }
1684*94d3b452SApple OSS Distributions
1685*94d3b452SApple OSS Distributions /*
1686*94d3b452SApple OSS Distributions * We want to do the cooperative pool refresh after dequeueing a
1687*94d3b452SApple OSS Distributions * cooperative thread request if any (to combine both effects into 1
1688*94d3b452SApple OSS Distributions * refresh operation)
1689*94d3b452SApple OSS Distributions */
1690*94d3b452SApple OSS Distributions if (cooperative_sched_count_changed) {
1691*94d3b452SApple OSS Distributions next_highest_request_changed = _wq_cooperative_queue_refresh_best_req_qos(wq);
1692*94d3b452SApple OSS Distributions }
1693*94d3b452SApple OSS Distributions
1694*94d3b452SApple OSS Distributions if (!workq_threadreq_is_cooperative(req)) {
1695*94d3b452SApple OSS Distributions /*
1696*94d3b452SApple OSS Distributions * All other types of requests are enqueued in priority queues
1697*94d3b452SApple OSS Distributions */
1698*94d3b452SApple OSS Distributions
1699*94d3b452SApple OSS Distributions if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
1700*94d3b452SApple OSS Distributions &req->tr_entry)) {
1701*94d3b452SApple OSS Distributions next_highest_request_changed |= true;
1702*94d3b452SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1703*94d3b452SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1704*94d3b452SApple OSS Distributions }
1705*94d3b452SApple OSS Distributions }
1706*94d3b452SApple OSS Distributions }
1707*94d3b452SApple OSS Distributions }
1708*94d3b452SApple OSS Distributions
1709*94d3b452SApple OSS Distributions return next_highest_request_changed;
1710*94d3b452SApple OSS Distributions }
1711*94d3b452SApple OSS Distributions
1712*94d3b452SApple OSS Distributions static void
workq_threadreq_destroy(proc_t p,workq_threadreq_t req)1713*94d3b452SApple OSS Distributions workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
1714*94d3b452SApple OSS Distributions {
1715*94d3b452SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_CANCELED;
1716*94d3b452SApple OSS Distributions if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
1717*94d3b452SApple OSS Distributions kqueue_threadreq_cancel(p, req);
1718*94d3b452SApple OSS Distributions } else {
1719*94d3b452SApple OSS Distributions zfree(workq_zone_threadreq, req);
1720*94d3b452SApple OSS Distributions }
1721*94d3b452SApple OSS Distributions }
1722*94d3b452SApple OSS Distributions
1723*94d3b452SApple OSS Distributions #pragma mark workqueue thread creation thread calls
1724*94d3b452SApple OSS Distributions
1725*94d3b452SApple OSS Distributions static inline bool
workq_thread_call_prepost(struct workqueue * wq,uint32_t sched,uint32_t pend,uint32_t fail_mask)1726*94d3b452SApple OSS Distributions workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
1727*94d3b452SApple OSS Distributions uint32_t fail_mask)
1728*94d3b452SApple OSS Distributions {
1729*94d3b452SApple OSS Distributions uint32_t old_flags, new_flags;
1730*94d3b452SApple OSS Distributions
1731*94d3b452SApple OSS Distributions os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
1732*94d3b452SApple OSS Distributions if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
1733*94d3b452SApple OSS Distributions os_atomic_rmw_loop_give_up(return false);
1734*94d3b452SApple OSS Distributions }
1735*94d3b452SApple OSS Distributions if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
1736*94d3b452SApple OSS Distributions new_flags = old_flags | pend;
1737*94d3b452SApple OSS Distributions } else {
1738*94d3b452SApple OSS Distributions new_flags = old_flags | sched;
1739*94d3b452SApple OSS Distributions }
1740*94d3b452SApple OSS Distributions });
1741*94d3b452SApple OSS Distributions
1742*94d3b452SApple OSS Distributions return (old_flags & WQ_PROC_SUSPENDED) == 0;
1743*94d3b452SApple OSS Distributions }
1744*94d3b452SApple OSS Distributions
1745*94d3b452SApple OSS Distributions #define WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART 0x1
1746*94d3b452SApple OSS Distributions
1747*94d3b452SApple OSS Distributions static bool
workq_schedule_delayed_thread_creation(struct workqueue * wq,int flags)1748*94d3b452SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags)
1749*94d3b452SApple OSS Distributions {
1750*94d3b452SApple OSS Distributions assert(!preemption_enabled());
1751*94d3b452SApple OSS Distributions
1752*94d3b452SApple OSS Distributions if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
1753*94d3b452SApple OSS Distributions WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
1754*94d3b452SApple OSS Distributions WQ_IMMEDIATE_CALL_SCHEDULED)) {
1755*94d3b452SApple OSS Distributions return false;
1756*94d3b452SApple OSS Distributions }
1757*94d3b452SApple OSS Distributions
1758*94d3b452SApple OSS Distributions uint64_t now = mach_absolute_time();
1759*94d3b452SApple OSS Distributions
1760*94d3b452SApple OSS Distributions if (flags & WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART) {
1761*94d3b452SApple OSS Distributions /* do not change the window */
1762*94d3b452SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
1763*94d3b452SApple OSS Distributions wq->wq_timer_interval *= 2;
1764*94d3b452SApple OSS Distributions if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
1765*94d3b452SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
1766*94d3b452SApple OSS Distributions }
1767*94d3b452SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
1768*94d3b452SApple OSS Distributions wq->wq_timer_interval /= 2;
1769*94d3b452SApple OSS Distributions if (wq->wq_timer_interval < wq_stalled_window.abstime) {
1770*94d3b452SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
1771*94d3b452SApple OSS Distributions }
1772*94d3b452SApple OSS Distributions }
1773*94d3b452SApple OSS Distributions
1774*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1775*94d3b452SApple OSS Distributions _wq_flags(wq), wq->wq_timer_interval);
1776*94d3b452SApple OSS Distributions
1777*94d3b452SApple OSS Distributions thread_call_t call = wq->wq_delayed_call;
1778*94d3b452SApple OSS Distributions uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
1779*94d3b452SApple OSS Distributions uint64_t deadline = now + wq->wq_timer_interval;
1780*94d3b452SApple OSS Distributions if (thread_call_enter1_delayed(call, (void *)arg, deadline)) {
1781*94d3b452SApple OSS Distributions panic("delayed_call was already enqueued");
1782*94d3b452SApple OSS Distributions }
1783*94d3b452SApple OSS Distributions return true;
1784*94d3b452SApple OSS Distributions }
1785*94d3b452SApple OSS Distributions
1786*94d3b452SApple OSS Distributions static void
workq_schedule_immediate_thread_creation(struct workqueue * wq)1787*94d3b452SApple OSS Distributions workq_schedule_immediate_thread_creation(struct workqueue *wq)
1788*94d3b452SApple OSS Distributions {
1789*94d3b452SApple OSS Distributions assert(!preemption_enabled());
1790*94d3b452SApple OSS Distributions
1791*94d3b452SApple OSS Distributions if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
1792*94d3b452SApple OSS Distributions WQ_IMMEDIATE_CALL_PENDED, 0)) {
1793*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1794*94d3b452SApple OSS Distributions _wq_flags(wq), 0);
1795*94d3b452SApple OSS Distributions
1796*94d3b452SApple OSS Distributions uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
1797*94d3b452SApple OSS Distributions if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
1798*94d3b452SApple OSS Distributions panic("immediate_call was already enqueued");
1799*94d3b452SApple OSS Distributions }
1800*94d3b452SApple OSS Distributions }
1801*94d3b452SApple OSS Distributions }
1802*94d3b452SApple OSS Distributions
1803*94d3b452SApple OSS Distributions void
workq_proc_suspended(struct proc * p)1804*94d3b452SApple OSS Distributions workq_proc_suspended(struct proc *p)
1805*94d3b452SApple OSS Distributions {
1806*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1807*94d3b452SApple OSS Distributions
1808*94d3b452SApple OSS Distributions if (wq) {
1809*94d3b452SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
1810*94d3b452SApple OSS Distributions }
1811*94d3b452SApple OSS Distributions }
1812*94d3b452SApple OSS Distributions
1813*94d3b452SApple OSS Distributions void
workq_proc_resumed(struct proc * p)1814*94d3b452SApple OSS Distributions workq_proc_resumed(struct proc *p)
1815*94d3b452SApple OSS Distributions {
1816*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1817*94d3b452SApple OSS Distributions uint32_t wq_flags;
1818*94d3b452SApple OSS Distributions
1819*94d3b452SApple OSS Distributions if (!wq) {
1820*94d3b452SApple OSS Distributions return;
1821*94d3b452SApple OSS Distributions }
1822*94d3b452SApple OSS Distributions
1823*94d3b452SApple OSS Distributions wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
1824*94d3b452SApple OSS Distributions WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
1825*94d3b452SApple OSS Distributions if ((wq_flags & WQ_EXITING) == 0) {
1826*94d3b452SApple OSS Distributions disable_preemption();
1827*94d3b452SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
1828*94d3b452SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
1829*94d3b452SApple OSS Distributions } else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
1830*94d3b452SApple OSS Distributions workq_schedule_delayed_thread_creation(wq,
1831*94d3b452SApple OSS Distributions WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
1832*94d3b452SApple OSS Distributions }
1833*94d3b452SApple OSS Distributions enable_preemption();
1834*94d3b452SApple OSS Distributions }
1835*94d3b452SApple OSS Distributions }
1836*94d3b452SApple OSS Distributions
1837*94d3b452SApple OSS Distributions /**
1838*94d3b452SApple OSS Distributions * returns whether lastblocked_tsp is within wq_stalled_window usecs of now
1839*94d3b452SApple OSS Distributions */
1840*94d3b452SApple OSS Distributions static bool
workq_thread_is_busy(uint64_t now,_Atomic uint64_t * lastblocked_tsp)1841*94d3b452SApple OSS Distributions workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
1842*94d3b452SApple OSS Distributions {
1843*94d3b452SApple OSS Distributions uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
1844*94d3b452SApple OSS Distributions if (now <= lastblocked_ts) {
1845*94d3b452SApple OSS Distributions /*
1846*94d3b452SApple OSS Distributions * Because the update of the timestamp when a thread blocks
1847*94d3b452SApple OSS Distributions * isn't serialized against us looking at it (i.e. we don't hold
1848*94d3b452SApple OSS Distributions * the workq lock), it's possible to have a timestamp that matches
1849*94d3b452SApple OSS Distributions * the current time or that even looks to be in the future relative
1850*94d3b452SApple OSS Distributions * to when we grabbed the current time...
1851*94d3b452SApple OSS Distributions *
1852*94d3b452SApple OSS Distributions * Just treat this as a busy thread since it must have just blocked.
1853*94d3b452SApple OSS Distributions */
1854*94d3b452SApple OSS Distributions return true;
1855*94d3b452SApple OSS Distributions }
1856*94d3b452SApple OSS Distributions return (now - lastblocked_ts) < wq_stalled_window.abstime;
1857*94d3b452SApple OSS Distributions }
1858*94d3b452SApple OSS Distributions
1859*94d3b452SApple OSS Distributions static void
workq_add_new_threads_call(void * _p,void * flags)1860*94d3b452SApple OSS Distributions workq_add_new_threads_call(void *_p, void *flags)
1861*94d3b452SApple OSS Distributions {
1862*94d3b452SApple OSS Distributions proc_t p = _p;
1863*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1864*94d3b452SApple OSS Distributions uint32_t my_flag = (uint32_t)(uintptr_t)flags;
1865*94d3b452SApple OSS Distributions
1866*94d3b452SApple OSS Distributions /*
1867*94d3b452SApple OSS Distributions * workq_exit() will set the workqueue to NULL before
1868*94d3b452SApple OSS Distributions * it cancels thread calls.
1869*94d3b452SApple OSS Distributions */
1870*94d3b452SApple OSS Distributions if (!wq) {
1871*94d3b452SApple OSS Distributions return;
1872*94d3b452SApple OSS Distributions }
1873*94d3b452SApple OSS Distributions
1874*94d3b452SApple OSS Distributions assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
1875*94d3b452SApple OSS Distributions (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
1876*94d3b452SApple OSS Distributions
1877*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
1878*94d3b452SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1879*94d3b452SApple OSS Distributions
1880*94d3b452SApple OSS Distributions workq_lock_spin(wq);
1881*94d3b452SApple OSS Distributions
1882*94d3b452SApple OSS Distributions wq->wq_thread_call_last_run = mach_absolute_time();
1883*94d3b452SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, my_flag, release);
1884*94d3b452SApple OSS Distributions
1885*94d3b452SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
1886*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
1887*94d3b452SApple OSS Distributions
1888*94d3b452SApple OSS Distributions workq_unlock(wq);
1889*94d3b452SApple OSS Distributions
1890*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
1891*94d3b452SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1892*94d3b452SApple OSS Distributions }
1893*94d3b452SApple OSS Distributions
1894*94d3b452SApple OSS Distributions #pragma mark thread state tracking
1895*94d3b452SApple OSS Distributions
1896*94d3b452SApple OSS Distributions static void
workq_sched_callback(int type,thread_t thread)1897*94d3b452SApple OSS Distributions workq_sched_callback(int type, thread_t thread)
1898*94d3b452SApple OSS Distributions {
1899*94d3b452SApple OSS Distributions thread_ro_t tro = get_thread_ro(thread);
1900*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
1901*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(tro->tro_proc);
1902*94d3b452SApple OSS Distributions thread_qos_t req_qos, qos = uth->uu_workq_pri.qos_bucket;
1903*94d3b452SApple OSS Distributions wq_thactive_t old_thactive;
1904*94d3b452SApple OSS Distributions bool start_timer = false;
1905*94d3b452SApple OSS Distributions
1906*94d3b452SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
1907*94d3b452SApple OSS Distributions return;
1908*94d3b452SApple OSS Distributions }
1909*94d3b452SApple OSS Distributions
1910*94d3b452SApple OSS Distributions switch (type) {
1911*94d3b452SApple OSS Distributions case SCHED_CALL_BLOCK:
1912*94d3b452SApple OSS Distributions old_thactive = _wq_thactive_dec(wq, qos);
1913*94d3b452SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1914*94d3b452SApple OSS Distributions
1915*94d3b452SApple OSS Distributions /*
1916*94d3b452SApple OSS Distributions * Remember the timestamp of the last thread that blocked in this
1917*94d3b452SApple OSS Distributions * bucket, it used used by admission checks to ignore one thread
1918*94d3b452SApple OSS Distributions * being inactive if this timestamp is recent enough.
1919*94d3b452SApple OSS Distributions *
1920*94d3b452SApple OSS Distributions * If we collide with another thread trying to update the
1921*94d3b452SApple OSS Distributions * last_blocked (really unlikely since another thread would have to
1922*94d3b452SApple OSS Distributions * get scheduled and then block after we start down this path), it's
1923*94d3b452SApple OSS Distributions * not a problem. Either timestamp is adequate, so no need to retry
1924*94d3b452SApple OSS Distributions */
1925*94d3b452SApple OSS Distributions os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
1926*94d3b452SApple OSS Distributions thread_last_run_time(thread), relaxed);
1927*94d3b452SApple OSS Distributions
1928*94d3b452SApple OSS Distributions if (req_qos == THREAD_QOS_UNSPECIFIED) {
1929*94d3b452SApple OSS Distributions /*
1930*94d3b452SApple OSS Distributions * No pending request at the moment we could unblock, move on.
1931*94d3b452SApple OSS Distributions */
1932*94d3b452SApple OSS Distributions } else if (qos < req_qos) {
1933*94d3b452SApple OSS Distributions /*
1934*94d3b452SApple OSS Distributions * The blocking thread is at a lower QoS than the highest currently
1935*94d3b452SApple OSS Distributions * pending constrained request, nothing has to be redriven
1936*94d3b452SApple OSS Distributions */
1937*94d3b452SApple OSS Distributions } else {
1938*94d3b452SApple OSS Distributions uint32_t max_busycount, old_req_count;
1939*94d3b452SApple OSS Distributions old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
1940*94d3b452SApple OSS Distributions req_qos, NULL, &max_busycount);
1941*94d3b452SApple OSS Distributions /*
1942*94d3b452SApple OSS Distributions * If it is possible that may_start_constrained_thread had refused
1943*94d3b452SApple OSS Distributions * admission due to being over the max concurrency, we may need to
1944*94d3b452SApple OSS Distributions * spin up a new thread.
1945*94d3b452SApple OSS Distributions *
1946*94d3b452SApple OSS Distributions * We take into account the maximum number of busy threads
1947*94d3b452SApple OSS Distributions * that can affect may_start_constrained_thread as looking at the
1948*94d3b452SApple OSS Distributions * actual number may_start_constrained_thread will see is racy.
1949*94d3b452SApple OSS Distributions *
1950*94d3b452SApple OSS Distributions * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
1951*94d3b452SApple OSS Distributions * between NCPU (4) and NCPU - 2 (2) we need to redrive.
1952*94d3b452SApple OSS Distributions */
1953*94d3b452SApple OSS Distributions uint32_t conc = wq_max_parallelism[_wq_bucket(qos)];
1954*94d3b452SApple OSS Distributions if (old_req_count <= conc && conc <= old_req_count + max_busycount) {
1955*94d3b452SApple OSS Distributions start_timer = workq_schedule_delayed_thread_creation(wq, 0);
1956*94d3b452SApple OSS Distributions }
1957*94d3b452SApple OSS Distributions }
1958*94d3b452SApple OSS Distributions if (__improbable(kdebug_enable)) {
1959*94d3b452SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
1960*94d3b452SApple OSS Distributions old_thactive, qos, NULL, NULL);
1961*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
1962*94d3b452SApple OSS Distributions old - 1, qos | (req_qos << 8),
1963*94d3b452SApple OSS Distributions wq->wq_reqcount << 1 | start_timer);
1964*94d3b452SApple OSS Distributions }
1965*94d3b452SApple OSS Distributions break;
1966*94d3b452SApple OSS Distributions
1967*94d3b452SApple OSS Distributions case SCHED_CALL_UNBLOCK:
1968*94d3b452SApple OSS Distributions /*
1969*94d3b452SApple OSS Distributions * we cannot take the workqueue_lock here...
1970*94d3b452SApple OSS Distributions * an UNBLOCK can occur from a timer event which
1971*94d3b452SApple OSS Distributions * is run from an interrupt context... if the workqueue_lock
1972*94d3b452SApple OSS Distributions * is already held by this processor, we'll deadlock...
1973*94d3b452SApple OSS Distributions * the thread lock for the thread being UNBLOCKED
1974*94d3b452SApple OSS Distributions * is also held
1975*94d3b452SApple OSS Distributions */
1976*94d3b452SApple OSS Distributions old_thactive = _wq_thactive_inc(wq, qos);
1977*94d3b452SApple OSS Distributions if (__improbable(kdebug_enable)) {
1978*94d3b452SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
1979*94d3b452SApple OSS Distributions old_thactive, qos, NULL, NULL);
1980*94d3b452SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1981*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
1982*94d3b452SApple OSS Distributions old + 1, qos | (req_qos << 8),
1983*94d3b452SApple OSS Distributions wq->wq_threads_scheduled);
1984*94d3b452SApple OSS Distributions }
1985*94d3b452SApple OSS Distributions break;
1986*94d3b452SApple OSS Distributions }
1987*94d3b452SApple OSS Distributions }
1988*94d3b452SApple OSS Distributions
1989*94d3b452SApple OSS Distributions #pragma mark workq lifecycle
1990*94d3b452SApple OSS Distributions
1991*94d3b452SApple OSS Distributions void
workq_reference(struct workqueue * wq)1992*94d3b452SApple OSS Distributions workq_reference(struct workqueue *wq)
1993*94d3b452SApple OSS Distributions {
1994*94d3b452SApple OSS Distributions os_ref_retain(&wq->wq_refcnt);
1995*94d3b452SApple OSS Distributions }
1996*94d3b452SApple OSS Distributions
1997*94d3b452SApple OSS Distributions static void
workq_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1998*94d3b452SApple OSS Distributions workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
1999*94d3b452SApple OSS Distributions __assert_only mpsc_daemon_queue_t dq)
2000*94d3b452SApple OSS Distributions {
2001*94d3b452SApple OSS Distributions struct workqueue *wq;
2002*94d3b452SApple OSS Distributions struct turnstile *ts;
2003*94d3b452SApple OSS Distributions
2004*94d3b452SApple OSS Distributions wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
2005*94d3b452SApple OSS Distributions assert(dq == &workq_deallocate_queue);
2006*94d3b452SApple OSS Distributions
2007*94d3b452SApple OSS Distributions turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
2008*94d3b452SApple OSS Distributions assert(ts);
2009*94d3b452SApple OSS Distributions turnstile_cleanup();
2010*94d3b452SApple OSS Distributions turnstile_deallocate(ts);
2011*94d3b452SApple OSS Distributions
2012*94d3b452SApple OSS Distributions lck_ticket_destroy(&wq->wq_lock, &workq_lck_grp);
2013*94d3b452SApple OSS Distributions zfree(workq_zone_workqueue, wq);
2014*94d3b452SApple OSS Distributions }
2015*94d3b452SApple OSS Distributions
2016*94d3b452SApple OSS Distributions static void
workq_deallocate(struct workqueue * wq)2017*94d3b452SApple OSS Distributions workq_deallocate(struct workqueue *wq)
2018*94d3b452SApple OSS Distributions {
2019*94d3b452SApple OSS Distributions if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
2020*94d3b452SApple OSS Distributions workq_deallocate_queue_invoke(&wq->wq_destroy_link,
2021*94d3b452SApple OSS Distributions &workq_deallocate_queue);
2022*94d3b452SApple OSS Distributions }
2023*94d3b452SApple OSS Distributions }
2024*94d3b452SApple OSS Distributions
2025*94d3b452SApple OSS Distributions void
workq_deallocate_safe(struct workqueue * wq)2026*94d3b452SApple OSS Distributions workq_deallocate_safe(struct workqueue *wq)
2027*94d3b452SApple OSS Distributions {
2028*94d3b452SApple OSS Distributions if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
2029*94d3b452SApple OSS Distributions mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
2030*94d3b452SApple OSS Distributions MPSC_QUEUE_DISABLE_PREEMPTION);
2031*94d3b452SApple OSS Distributions }
2032*94d3b452SApple OSS Distributions }
2033*94d3b452SApple OSS Distributions
2034*94d3b452SApple OSS Distributions /**
2035*94d3b452SApple OSS Distributions * Setup per-process state for the workqueue.
2036*94d3b452SApple OSS Distributions */
2037*94d3b452SApple OSS Distributions int
workq_open(struct proc * p,__unused struct workq_open_args * uap,__unused int32_t * retval)2038*94d3b452SApple OSS Distributions workq_open(struct proc *p, __unused struct workq_open_args *uap,
2039*94d3b452SApple OSS Distributions __unused int32_t *retval)
2040*94d3b452SApple OSS Distributions {
2041*94d3b452SApple OSS Distributions struct workqueue *wq;
2042*94d3b452SApple OSS Distributions int error = 0;
2043*94d3b452SApple OSS Distributions
2044*94d3b452SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
2045*94d3b452SApple OSS Distributions return EINVAL;
2046*94d3b452SApple OSS Distributions }
2047*94d3b452SApple OSS Distributions
2048*94d3b452SApple OSS Distributions if (wq_init_constrained_limit) {
2049*94d3b452SApple OSS Distributions uint32_t limit, num_cpus = ml_wait_max_cpus();
2050*94d3b452SApple OSS Distributions
2051*94d3b452SApple OSS Distributions /*
2052*94d3b452SApple OSS Distributions * set up the limit for the constrained pool
2053*94d3b452SApple OSS Distributions * this is a virtual pool in that we don't
2054*94d3b452SApple OSS Distributions * maintain it on a separate idle and run list
2055*94d3b452SApple OSS Distributions */
2056*94d3b452SApple OSS Distributions limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
2057*94d3b452SApple OSS Distributions
2058*94d3b452SApple OSS Distributions if (limit > wq_max_constrained_threads) {
2059*94d3b452SApple OSS Distributions wq_max_constrained_threads = limit;
2060*94d3b452SApple OSS Distributions }
2061*94d3b452SApple OSS Distributions
2062*94d3b452SApple OSS Distributions if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
2063*94d3b452SApple OSS Distributions wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
2064*94d3b452SApple OSS Distributions }
2065*94d3b452SApple OSS Distributions if (wq_max_threads > CONFIG_THREAD_MAX - 20) {
2066*94d3b452SApple OSS Distributions wq_max_threads = CONFIG_THREAD_MAX - 20;
2067*94d3b452SApple OSS Distributions }
2068*94d3b452SApple OSS Distributions
2069*94d3b452SApple OSS Distributions wq_death_max_load = (uint16_t)fls(num_cpus) + 1;
2070*94d3b452SApple OSS Distributions
2071*94d3b452SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
2072*94d3b452SApple OSS Distributions wq_max_parallelism[_wq_bucket(qos)] =
2073*94d3b452SApple OSS Distributions qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
2074*94d3b452SApple OSS Distributions }
2075*94d3b452SApple OSS Distributions
2076*94d3b452SApple OSS Distributions wq_max_cooperative_threads = num_cpus;
2077*94d3b452SApple OSS Distributions
2078*94d3b452SApple OSS Distributions wq_init_constrained_limit = 0;
2079*94d3b452SApple OSS Distributions }
2080*94d3b452SApple OSS Distributions
2081*94d3b452SApple OSS Distributions if (proc_get_wqptr(p) == NULL) {
2082*94d3b452SApple OSS Distributions if (proc_init_wqptr_or_wait(p) == FALSE) {
2083*94d3b452SApple OSS Distributions assert(proc_get_wqptr(p) != NULL);
2084*94d3b452SApple OSS Distributions goto out;
2085*94d3b452SApple OSS Distributions }
2086*94d3b452SApple OSS Distributions
2087*94d3b452SApple OSS Distributions wq = zalloc_flags(workq_zone_workqueue, Z_WAITOK | Z_ZERO);
2088*94d3b452SApple OSS Distributions
2089*94d3b452SApple OSS Distributions os_ref_init_count(&wq->wq_refcnt, &workq_refgrp, 1);
2090*94d3b452SApple OSS Distributions
2091*94d3b452SApple OSS Distributions // Start the event manager at the priority hinted at by the policy engine
2092*94d3b452SApple OSS Distributions thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
2093*94d3b452SApple OSS Distributions pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
2094*94d3b452SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pp;
2095*94d3b452SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
2096*94d3b452SApple OSS Distributions wq->wq_proc = p;
2097*94d3b452SApple OSS Distributions turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
2098*94d3b452SApple OSS Distributions TURNSTILE_WORKQS);
2099*94d3b452SApple OSS Distributions
2100*94d3b452SApple OSS Distributions TAILQ_INIT(&wq->wq_thrunlist);
2101*94d3b452SApple OSS Distributions TAILQ_INIT(&wq->wq_thnewlist);
2102*94d3b452SApple OSS Distributions TAILQ_INIT(&wq->wq_thidlelist);
2103*94d3b452SApple OSS Distributions priority_queue_init(&wq->wq_overcommit_queue);
2104*94d3b452SApple OSS Distributions priority_queue_init(&wq->wq_constrained_queue);
2105*94d3b452SApple OSS Distributions priority_queue_init(&wq->wq_special_queue);
2106*94d3b452SApple OSS Distributions for (int bucket = 0; bucket < WORKQ_NUM_QOS_BUCKETS; bucket++) {
2107*94d3b452SApple OSS Distributions STAILQ_INIT(&wq->wq_cooperative_queue[bucket]);
2108*94d3b452SApple OSS Distributions }
2109*94d3b452SApple OSS Distributions
2110*94d3b452SApple OSS Distributions /* We are only using the delayed thread call for the constrained pool
2111*94d3b452SApple OSS Distributions * which can't have work at >= UI QoS and so we can be fine with a
2112*94d3b452SApple OSS Distributions * UI QoS thread call.
2113*94d3b452SApple OSS Distributions */
2114*94d3b452SApple OSS Distributions wq->wq_delayed_call = thread_call_allocate_with_qos(
2115*94d3b452SApple OSS Distributions workq_add_new_threads_call, p, THREAD_QOS_USER_INTERACTIVE,
2116*94d3b452SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2117*94d3b452SApple OSS Distributions wq->wq_immediate_call = thread_call_allocate_with_options(
2118*94d3b452SApple OSS Distributions workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
2119*94d3b452SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2120*94d3b452SApple OSS Distributions wq->wq_death_call = thread_call_allocate_with_options(
2121*94d3b452SApple OSS Distributions workq_kill_old_threads_call, wq,
2122*94d3b452SApple OSS Distributions THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
2123*94d3b452SApple OSS Distributions
2124*94d3b452SApple OSS Distributions lck_ticket_init(&wq->wq_lock, &workq_lck_grp);
2125*94d3b452SApple OSS Distributions
2126*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
2127*94d3b452SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2128*94d3b452SApple OSS Distributions proc_set_wqptr(p, wq);
2129*94d3b452SApple OSS Distributions }
2130*94d3b452SApple OSS Distributions out:
2131*94d3b452SApple OSS Distributions
2132*94d3b452SApple OSS Distributions return error;
2133*94d3b452SApple OSS Distributions }
2134*94d3b452SApple OSS Distributions
2135*94d3b452SApple OSS Distributions /*
2136*94d3b452SApple OSS Distributions * Routine: workq_mark_exiting
2137*94d3b452SApple OSS Distributions *
2138*94d3b452SApple OSS Distributions * Function: Mark the work queue such that new threads will not be added to the
2139*94d3b452SApple OSS Distributions * work queue after we return.
2140*94d3b452SApple OSS Distributions *
2141*94d3b452SApple OSS Distributions * Conditions: Called against the current process.
2142*94d3b452SApple OSS Distributions */
2143*94d3b452SApple OSS Distributions void
workq_mark_exiting(struct proc * p)2144*94d3b452SApple OSS Distributions workq_mark_exiting(struct proc *p)
2145*94d3b452SApple OSS Distributions {
2146*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2147*94d3b452SApple OSS Distributions uint32_t wq_flags;
2148*94d3b452SApple OSS Distributions workq_threadreq_t mgr_req;
2149*94d3b452SApple OSS Distributions
2150*94d3b452SApple OSS Distributions if (!wq) {
2151*94d3b452SApple OSS Distributions return;
2152*94d3b452SApple OSS Distributions }
2153*94d3b452SApple OSS Distributions
2154*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0);
2155*94d3b452SApple OSS Distributions
2156*94d3b452SApple OSS Distributions workq_lock_spin(wq);
2157*94d3b452SApple OSS Distributions
2158*94d3b452SApple OSS Distributions wq_flags = os_atomic_or_orig(&wq->wq_flags, WQ_EXITING, relaxed);
2159*94d3b452SApple OSS Distributions if (__improbable(wq_flags & WQ_EXITING)) {
2160*94d3b452SApple OSS Distributions panic("workq_mark_exiting called twice");
2161*94d3b452SApple OSS Distributions }
2162*94d3b452SApple OSS Distributions
2163*94d3b452SApple OSS Distributions /*
2164*94d3b452SApple OSS Distributions * Opportunistically try to cancel thread calls that are likely in flight.
2165*94d3b452SApple OSS Distributions * workq_exit() will do the proper cleanup.
2166*94d3b452SApple OSS Distributions */
2167*94d3b452SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_SCHEDULED) {
2168*94d3b452SApple OSS Distributions thread_call_cancel(wq->wq_immediate_call);
2169*94d3b452SApple OSS Distributions }
2170*94d3b452SApple OSS Distributions if (wq_flags & WQ_DELAYED_CALL_SCHEDULED) {
2171*94d3b452SApple OSS Distributions thread_call_cancel(wq->wq_delayed_call);
2172*94d3b452SApple OSS Distributions }
2173*94d3b452SApple OSS Distributions if (wq_flags & WQ_DEATH_CALL_SCHEDULED) {
2174*94d3b452SApple OSS Distributions thread_call_cancel(wq->wq_death_call);
2175*94d3b452SApple OSS Distributions }
2176*94d3b452SApple OSS Distributions
2177*94d3b452SApple OSS Distributions mgr_req = wq->wq_event_manager_threadreq;
2178*94d3b452SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
2179*94d3b452SApple OSS Distributions wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
2180*94d3b452SApple OSS Distributions wq->wq_creator = NULL;
2181*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
2182*94d3b452SApple OSS Distributions
2183*94d3b452SApple OSS Distributions workq_unlock(wq);
2184*94d3b452SApple OSS Distributions
2185*94d3b452SApple OSS Distributions if (mgr_req) {
2186*94d3b452SApple OSS Distributions kqueue_threadreq_cancel(p, mgr_req);
2187*94d3b452SApple OSS Distributions }
2188*94d3b452SApple OSS Distributions /*
2189*94d3b452SApple OSS Distributions * No one touches the priority queues once WQ_EXITING is set.
2190*94d3b452SApple OSS Distributions * It is hence safe to do the tear down without holding any lock.
2191*94d3b452SApple OSS Distributions */
2192*94d3b452SApple OSS Distributions priority_queue_destroy(&wq->wq_overcommit_queue,
2193*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2194*94d3b452SApple OSS Distributions workq_threadreq_destroy(p, e);
2195*94d3b452SApple OSS Distributions });
2196*94d3b452SApple OSS Distributions priority_queue_destroy(&wq->wq_constrained_queue,
2197*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2198*94d3b452SApple OSS Distributions workq_threadreq_destroy(p, e);
2199*94d3b452SApple OSS Distributions });
2200*94d3b452SApple OSS Distributions priority_queue_destroy(&wq->wq_special_queue,
2201*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2202*94d3b452SApple OSS Distributions workq_threadreq_destroy(p, e);
2203*94d3b452SApple OSS Distributions });
2204*94d3b452SApple OSS Distributions
2205*94d3b452SApple OSS Distributions WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0);
2206*94d3b452SApple OSS Distributions }
2207*94d3b452SApple OSS Distributions
2208*94d3b452SApple OSS Distributions /*
2209*94d3b452SApple OSS Distributions * Routine: workq_exit
2210*94d3b452SApple OSS Distributions *
2211*94d3b452SApple OSS Distributions * Function: clean up the work queue structure(s) now that there are no threads
2212*94d3b452SApple OSS Distributions * left running inside the work queue (except possibly current_thread).
2213*94d3b452SApple OSS Distributions *
2214*94d3b452SApple OSS Distributions * Conditions: Called by the last thread in the process.
2215*94d3b452SApple OSS Distributions * Called against current process.
2216*94d3b452SApple OSS Distributions */
2217*94d3b452SApple OSS Distributions void
workq_exit(struct proc * p)2218*94d3b452SApple OSS Distributions workq_exit(struct proc *p)
2219*94d3b452SApple OSS Distributions {
2220*94d3b452SApple OSS Distributions struct workqueue *wq;
2221*94d3b452SApple OSS Distributions struct uthread *uth, *tmp;
2222*94d3b452SApple OSS Distributions
2223*94d3b452SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, NULL, relaxed);
2224*94d3b452SApple OSS Distributions if (wq != NULL) {
2225*94d3b452SApple OSS Distributions thread_t th = current_thread();
2226*94d3b452SApple OSS Distributions
2227*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0);
2228*94d3b452SApple OSS Distributions
2229*94d3b452SApple OSS Distributions if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
2230*94d3b452SApple OSS Distributions /*
2231*94d3b452SApple OSS Distributions * <rdar://problem/40111515> Make sure we will no longer call the
2232*94d3b452SApple OSS Distributions * sched call, if we ever block this thread, which the cancel_wait
2233*94d3b452SApple OSS Distributions * below can do.
2234*94d3b452SApple OSS Distributions */
2235*94d3b452SApple OSS Distributions thread_sched_call(th, NULL);
2236*94d3b452SApple OSS Distributions }
2237*94d3b452SApple OSS Distributions
2238*94d3b452SApple OSS Distributions /*
2239*94d3b452SApple OSS Distributions * Thread calls are always scheduled by the proc itself or under the
2240*94d3b452SApple OSS Distributions * workqueue spinlock if WQ_EXITING is not yet set.
2241*94d3b452SApple OSS Distributions *
2242*94d3b452SApple OSS Distributions * Either way, when this runs, the proc has no threads left beside
2243*94d3b452SApple OSS Distributions * the one running this very code, so we know no thread call can be
2244*94d3b452SApple OSS Distributions * dispatched anymore.
2245*94d3b452SApple OSS Distributions */
2246*94d3b452SApple OSS Distributions thread_call_cancel_wait(wq->wq_delayed_call);
2247*94d3b452SApple OSS Distributions thread_call_cancel_wait(wq->wq_immediate_call);
2248*94d3b452SApple OSS Distributions thread_call_cancel_wait(wq->wq_death_call);
2249*94d3b452SApple OSS Distributions thread_call_free(wq->wq_delayed_call);
2250*94d3b452SApple OSS Distributions thread_call_free(wq->wq_immediate_call);
2251*94d3b452SApple OSS Distributions thread_call_free(wq->wq_death_call);
2252*94d3b452SApple OSS Distributions
2253*94d3b452SApple OSS Distributions /*
2254*94d3b452SApple OSS Distributions * Clean up workqueue data structures for threads that exited and
2255*94d3b452SApple OSS Distributions * didn't get a chance to clean up after themselves.
2256*94d3b452SApple OSS Distributions *
2257*94d3b452SApple OSS Distributions * idle/new threads should have been interrupted and died on their own
2258*94d3b452SApple OSS Distributions */
2259*94d3b452SApple OSS Distributions TAILQ_FOREACH_SAFE(uth, &wq->wq_thrunlist, uu_workq_entry, tmp) {
2260*94d3b452SApple OSS Distributions thread_t mth = get_machthread(uth);
2261*94d3b452SApple OSS Distributions thread_sched_call(mth, NULL);
2262*94d3b452SApple OSS Distributions thread_deallocate(mth);
2263*94d3b452SApple OSS Distributions }
2264*94d3b452SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thnewlist));
2265*94d3b452SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thidlelist));
2266*94d3b452SApple OSS Distributions
2267*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
2268*94d3b452SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2269*94d3b452SApple OSS Distributions
2270*94d3b452SApple OSS Distributions workq_deallocate(wq);
2271*94d3b452SApple OSS Distributions
2272*94d3b452SApple OSS Distributions WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0);
2273*94d3b452SApple OSS Distributions }
2274*94d3b452SApple OSS Distributions }
2275*94d3b452SApple OSS Distributions
2276*94d3b452SApple OSS Distributions
2277*94d3b452SApple OSS Distributions #pragma mark bsd thread control
2278*94d3b452SApple OSS Distributions
2279*94d3b452SApple OSS Distributions bool
bsdthread_part_of_cooperative_workqueue(struct uthread * uth)2280*94d3b452SApple OSS Distributions bsdthread_part_of_cooperative_workqueue(struct uthread *uth)
2281*94d3b452SApple OSS Distributions {
2282*94d3b452SApple OSS Distributions return (workq_thread_is_cooperative(uth) || workq_thread_is_nonovercommit(uth)) &&
2283*94d3b452SApple OSS Distributions (uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER);
2284*94d3b452SApple OSS Distributions }
2285*94d3b452SApple OSS Distributions
2286*94d3b452SApple OSS Distributions static bool
_pthread_priority_to_policy(pthread_priority_t priority,thread_qos_policy_data_t * data)2287*94d3b452SApple OSS Distributions _pthread_priority_to_policy(pthread_priority_t priority,
2288*94d3b452SApple OSS Distributions thread_qos_policy_data_t *data)
2289*94d3b452SApple OSS Distributions {
2290*94d3b452SApple OSS Distributions data->qos_tier = _pthread_priority_thread_qos(priority);
2291*94d3b452SApple OSS Distributions data->tier_importance = _pthread_priority_relpri(priority);
2292*94d3b452SApple OSS Distributions if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
2293*94d3b452SApple OSS Distributions data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
2294*94d3b452SApple OSS Distributions return false;
2295*94d3b452SApple OSS Distributions }
2296*94d3b452SApple OSS Distributions return true;
2297*94d3b452SApple OSS Distributions }
2298*94d3b452SApple OSS Distributions
2299*94d3b452SApple OSS Distributions static int
bsdthread_set_self(proc_t p,thread_t th,pthread_priority_t priority,mach_port_name_t voucher,enum workq_set_self_flags flags)2300*94d3b452SApple OSS Distributions bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
2301*94d3b452SApple OSS Distributions mach_port_name_t voucher, enum workq_set_self_flags flags)
2302*94d3b452SApple OSS Distributions {
2303*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
2304*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2305*94d3b452SApple OSS Distributions
2306*94d3b452SApple OSS Distributions kern_return_t kr;
2307*94d3b452SApple OSS Distributions int unbind_rv = 0, qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
2308*94d3b452SApple OSS Distributions bool is_wq_thread = (thread_get_tag(th) & THREAD_TAG_WORKQUEUE);
2309*94d3b452SApple OSS Distributions
2310*94d3b452SApple OSS Distributions assert(th == current_thread());
2311*94d3b452SApple OSS Distributions if (flags & WORKQ_SET_SELF_WQ_KEVENT_UNBIND) {
2312*94d3b452SApple OSS Distributions if (!is_wq_thread) {
2313*94d3b452SApple OSS Distributions unbind_rv = EINVAL;
2314*94d3b452SApple OSS Distributions goto qos;
2315*94d3b452SApple OSS Distributions }
2316*94d3b452SApple OSS Distributions
2317*94d3b452SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
2318*94d3b452SApple OSS Distributions unbind_rv = EINVAL;
2319*94d3b452SApple OSS Distributions goto qos;
2320*94d3b452SApple OSS Distributions }
2321*94d3b452SApple OSS Distributions
2322*94d3b452SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
2323*94d3b452SApple OSS Distributions if (kqr == NULL) {
2324*94d3b452SApple OSS Distributions unbind_rv = EALREADY;
2325*94d3b452SApple OSS Distributions goto qos;
2326*94d3b452SApple OSS Distributions }
2327*94d3b452SApple OSS Distributions
2328*94d3b452SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2329*94d3b452SApple OSS Distributions unbind_rv = EINVAL;
2330*94d3b452SApple OSS Distributions goto qos;
2331*94d3b452SApple OSS Distributions }
2332*94d3b452SApple OSS Distributions
2333*94d3b452SApple OSS Distributions kqueue_threadreq_unbind(p, kqr);
2334*94d3b452SApple OSS Distributions }
2335*94d3b452SApple OSS Distributions
2336*94d3b452SApple OSS Distributions qos:
2337*94d3b452SApple OSS Distributions if (flags & (WORKQ_SET_SELF_QOS_FLAG | WORKQ_SET_SELF_QOS_OVERRIDE_FLAG)) {
2338*94d3b452SApple OSS Distributions assert(flags & WORKQ_SET_SELF_QOS_FLAG);
2339*94d3b452SApple OSS Distributions
2340*94d3b452SApple OSS Distributions thread_qos_policy_data_t new_policy;
2341*94d3b452SApple OSS Distributions thread_qos_t qos_override = THREAD_QOS_UNSPECIFIED;
2342*94d3b452SApple OSS Distributions
2343*94d3b452SApple OSS Distributions if (!_pthread_priority_to_policy(priority, &new_policy)) {
2344*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2345*94d3b452SApple OSS Distributions goto voucher;
2346*94d3b452SApple OSS Distributions }
2347*94d3b452SApple OSS Distributions
2348*94d3b452SApple OSS Distributions if (flags & WORKQ_SET_SELF_QOS_OVERRIDE_FLAG) {
2349*94d3b452SApple OSS Distributions /*
2350*94d3b452SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is set, we definitely
2351*94d3b452SApple OSS Distributions * should have an override QoS in the pthread_priority_t and we should
2352*94d3b452SApple OSS Distributions * only come into this path for cooperative thread requests
2353*94d3b452SApple OSS Distributions */
2354*94d3b452SApple OSS Distributions if (!_pthread_priority_has_override_qos(priority) ||
2355*94d3b452SApple OSS Distributions !_pthread_priority_is_cooperative(priority)) {
2356*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2357*94d3b452SApple OSS Distributions goto voucher;
2358*94d3b452SApple OSS Distributions }
2359*94d3b452SApple OSS Distributions qos_override = _pthread_priority_thread_override_qos(priority);
2360*94d3b452SApple OSS Distributions } else {
2361*94d3b452SApple OSS Distributions /*
2362*94d3b452SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is not set, we definitely
2363*94d3b452SApple OSS Distributions * should not have an override QoS in the pthread_priority_t
2364*94d3b452SApple OSS Distributions */
2365*94d3b452SApple OSS Distributions if (_pthread_priority_has_override_qos(priority)) {
2366*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2367*94d3b452SApple OSS Distributions goto voucher;
2368*94d3b452SApple OSS Distributions }
2369*94d3b452SApple OSS Distributions }
2370*94d3b452SApple OSS Distributions
2371*94d3b452SApple OSS Distributions if (!is_wq_thread) {
2372*94d3b452SApple OSS Distributions /*
2373*94d3b452SApple OSS Distributions * Threads opted out of QoS can't change QoS
2374*94d3b452SApple OSS Distributions */
2375*94d3b452SApple OSS Distributions if (!thread_has_qos_policy(th)) {
2376*94d3b452SApple OSS Distributions qos_rv = EPERM;
2377*94d3b452SApple OSS Distributions goto voucher;
2378*94d3b452SApple OSS Distributions }
2379*94d3b452SApple OSS Distributions } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
2380*94d3b452SApple OSS Distributions uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
2381*94d3b452SApple OSS Distributions /*
2382*94d3b452SApple OSS Distributions * Workqueue manager threads or threads above UI can't change QoS
2383*94d3b452SApple OSS Distributions */
2384*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2385*94d3b452SApple OSS Distributions goto voucher;
2386*94d3b452SApple OSS Distributions } else {
2387*94d3b452SApple OSS Distributions /*
2388*94d3b452SApple OSS Distributions * For workqueue threads, possibly adjust buckets and redrive thread
2389*94d3b452SApple OSS Distributions * requests.
2390*94d3b452SApple OSS Distributions *
2391*94d3b452SApple OSS Distributions * Transitions allowed:
2392*94d3b452SApple OSS Distributions *
2393*94d3b452SApple OSS Distributions * overcommit --> non-overcommit
2394*94d3b452SApple OSS Distributions * overcommit --> overcommit
2395*94d3b452SApple OSS Distributions * non-overcommit --> non-overcommit
2396*94d3b452SApple OSS Distributions * non-overcommit --> overcommit (to be deprecated later)
2397*94d3b452SApple OSS Distributions * cooperative --> cooperative
2398*94d3b452SApple OSS Distributions *
2399*94d3b452SApple OSS Distributions * All other transitions aren't allowed so reject them.
2400*94d3b452SApple OSS Distributions */
2401*94d3b452SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_cooperative(priority)) {
2402*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2403*94d3b452SApple OSS Distributions goto voucher;
2404*94d3b452SApple OSS Distributions } else if (workq_thread_is_cooperative(uth) && !_pthread_priority_is_cooperative(priority)) {
2405*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2406*94d3b452SApple OSS Distributions goto voucher;
2407*94d3b452SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_cooperative(priority)) {
2408*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2409*94d3b452SApple OSS Distributions goto voucher;
2410*94d3b452SApple OSS Distributions }
2411*94d3b452SApple OSS Distributions
2412*94d3b452SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2413*94d3b452SApple OSS Distributions bool force_run = false;
2414*94d3b452SApple OSS Distributions
2415*94d3b452SApple OSS Distributions if (qos_override) {
2416*94d3b452SApple OSS Distributions /*
2417*94d3b452SApple OSS Distributions * We're in the case of a thread clarifying that it is for eg. not IN
2418*94d3b452SApple OSS Distributions * req QoS but rather, UT req QoS with IN override. However, this can
2419*94d3b452SApple OSS Distributions * race with a concurrent override happening to the thread via
2420*94d3b452SApple OSS Distributions * workq_thread_add_dispatch_override so this needs to be
2421*94d3b452SApple OSS Distributions * synchronized with the thread mutex.
2422*94d3b452SApple OSS Distributions */
2423*94d3b452SApple OSS Distributions thread_mtx_lock(th);
2424*94d3b452SApple OSS Distributions }
2425*94d3b452SApple OSS Distributions
2426*94d3b452SApple OSS Distributions workq_lock_spin(wq);
2427*94d3b452SApple OSS Distributions
2428*94d3b452SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2429*94d3b452SApple OSS Distributions new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
2430*94d3b452SApple OSS Distributions
2431*94d3b452SApple OSS Distributions if (old_pri.qos_override < qos_override) {
2432*94d3b452SApple OSS Distributions /*
2433*94d3b452SApple OSS Distributions * Since this can race with a concurrent override via
2434*94d3b452SApple OSS Distributions * workq_thread_add_dispatch_override, only adjust override value if we
2435*94d3b452SApple OSS Distributions * are higher - this is a saturating function.
2436*94d3b452SApple OSS Distributions *
2437*94d3b452SApple OSS Distributions * We should not be changing the final override values, we should simply
2438*94d3b452SApple OSS Distributions * be redistributing the current value with a different breakdown of req
2439*94d3b452SApple OSS Distributions * vs override QoS - assert to that effect. Therefore, buckets should
2440*94d3b452SApple OSS Distributions * not change.
2441*94d3b452SApple OSS Distributions */
2442*94d3b452SApple OSS Distributions new_pri.qos_override = qos_override;
2443*94d3b452SApple OSS Distributions assert(workq_pri_override(new_pri) == workq_pri_override(old_pri));
2444*94d3b452SApple OSS Distributions assert(workq_pri_bucket(new_pri) == workq_pri_bucket(old_pri));
2445*94d3b452SApple OSS Distributions }
2446*94d3b452SApple OSS Distributions
2447*94d3b452SApple OSS Distributions /* Adjust schedule counts for various types of transitions */
2448*94d3b452SApple OSS Distributions
2449*94d3b452SApple OSS Distributions /* overcommit -> non-overcommit */
2450*94d3b452SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_nonovercommit(priority)) {
2451*94d3b452SApple OSS Distributions workq_thread_set_type(uth, 0);
2452*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
2453*94d3b452SApple OSS Distributions
2454*94d3b452SApple OSS Distributions /* non-overcommit -> overcommit */
2455*94d3b452SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_overcommit(priority)) {
2456*94d3b452SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
2457*94d3b452SApple OSS Distributions force_run = (wq->wq_constrained_threads_scheduled-- == wq_max_constrained_threads);
2458*94d3b452SApple OSS Distributions
2459*94d3b452SApple OSS Distributions /* cooperative -> cooperative */
2460*94d3b452SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
2461*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_pri.qos_req);
2462*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_pri.qos_req);
2463*94d3b452SApple OSS Distributions
2464*94d3b452SApple OSS Distributions /* We're changing schedule counts within cooperative pool, we
2465*94d3b452SApple OSS Distributions * need to refresh best cooperative QoS logic again */
2466*94d3b452SApple OSS Distributions force_run = _wq_cooperative_queue_refresh_best_req_qos(wq);
2467*94d3b452SApple OSS Distributions }
2468*94d3b452SApple OSS Distributions
2469*94d3b452SApple OSS Distributions /*
2470*94d3b452SApple OSS Distributions * This will set up an override on the thread if any and will also call
2471*94d3b452SApple OSS Distributions * schedule_creator if needed
2472*94d3b452SApple OSS Distributions */
2473*94d3b452SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
2474*94d3b452SApple OSS Distributions workq_unlock(wq);
2475*94d3b452SApple OSS Distributions
2476*94d3b452SApple OSS Distributions if (qos_override) {
2477*94d3b452SApple OSS Distributions thread_mtx_unlock(th);
2478*94d3b452SApple OSS Distributions }
2479*94d3b452SApple OSS Distributions
2480*94d3b452SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
2481*94d3b452SApple OSS Distributions thread_disarm_workqueue_quantum(th);
2482*94d3b452SApple OSS Distributions } else {
2483*94d3b452SApple OSS Distributions /* If the thread changed QoS buckets, the quantum duration
2484*94d3b452SApple OSS Distributions * may have changed too */
2485*94d3b452SApple OSS Distributions thread_arm_workqueue_quantum(th);
2486*94d3b452SApple OSS Distributions }
2487*94d3b452SApple OSS Distributions }
2488*94d3b452SApple OSS Distributions
2489*94d3b452SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
2490*94d3b452SApple OSS Distributions (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
2491*94d3b452SApple OSS Distributions if (kr != KERN_SUCCESS) {
2492*94d3b452SApple OSS Distributions qos_rv = EINVAL;
2493*94d3b452SApple OSS Distributions }
2494*94d3b452SApple OSS Distributions }
2495*94d3b452SApple OSS Distributions
2496*94d3b452SApple OSS Distributions voucher:
2497*94d3b452SApple OSS Distributions if (flags & WORKQ_SET_SELF_VOUCHER_FLAG) {
2498*94d3b452SApple OSS Distributions kr = thread_set_voucher_name(voucher);
2499*94d3b452SApple OSS Distributions if (kr != KERN_SUCCESS) {
2500*94d3b452SApple OSS Distributions voucher_rv = ENOENT;
2501*94d3b452SApple OSS Distributions goto fixedpri;
2502*94d3b452SApple OSS Distributions }
2503*94d3b452SApple OSS Distributions }
2504*94d3b452SApple OSS Distributions
2505*94d3b452SApple OSS Distributions fixedpri:
2506*94d3b452SApple OSS Distributions if (qos_rv) {
2507*94d3b452SApple OSS Distributions goto done;
2508*94d3b452SApple OSS Distributions }
2509*94d3b452SApple OSS Distributions if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
2510*94d3b452SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 0};
2511*94d3b452SApple OSS Distributions
2512*94d3b452SApple OSS Distributions if (is_wq_thread) {
2513*94d3b452SApple OSS Distributions /* Not allowed on workqueue threads */
2514*94d3b452SApple OSS Distributions fixedpri_rv = ENOTSUP;
2515*94d3b452SApple OSS Distributions goto done;
2516*94d3b452SApple OSS Distributions }
2517*94d3b452SApple OSS Distributions
2518*94d3b452SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2519*94d3b452SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2520*94d3b452SApple OSS Distributions if (kr != KERN_SUCCESS) {
2521*94d3b452SApple OSS Distributions fixedpri_rv = EINVAL;
2522*94d3b452SApple OSS Distributions goto done;
2523*94d3b452SApple OSS Distributions }
2524*94d3b452SApple OSS Distributions } else if (flags & WORKQ_SET_SELF_TIMESHARE_FLAG) {
2525*94d3b452SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 1};
2526*94d3b452SApple OSS Distributions
2527*94d3b452SApple OSS Distributions if (is_wq_thread) {
2528*94d3b452SApple OSS Distributions /* Not allowed on workqueue threads */
2529*94d3b452SApple OSS Distributions fixedpri_rv = ENOTSUP;
2530*94d3b452SApple OSS Distributions goto done;
2531*94d3b452SApple OSS Distributions }
2532*94d3b452SApple OSS Distributions
2533*94d3b452SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2534*94d3b452SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2535*94d3b452SApple OSS Distributions if (kr != KERN_SUCCESS) {
2536*94d3b452SApple OSS Distributions fixedpri_rv = EINVAL;
2537*94d3b452SApple OSS Distributions goto done;
2538*94d3b452SApple OSS Distributions }
2539*94d3b452SApple OSS Distributions }
2540*94d3b452SApple OSS Distributions
2541*94d3b452SApple OSS Distributions done:
2542*94d3b452SApple OSS Distributions if (qos_rv && voucher_rv) {
2543*94d3b452SApple OSS Distributions /* Both failed, give that a unique error. */
2544*94d3b452SApple OSS Distributions return EBADMSG;
2545*94d3b452SApple OSS Distributions }
2546*94d3b452SApple OSS Distributions
2547*94d3b452SApple OSS Distributions if (unbind_rv) {
2548*94d3b452SApple OSS Distributions return unbind_rv;
2549*94d3b452SApple OSS Distributions }
2550*94d3b452SApple OSS Distributions
2551*94d3b452SApple OSS Distributions if (qos_rv) {
2552*94d3b452SApple OSS Distributions return qos_rv;
2553*94d3b452SApple OSS Distributions }
2554*94d3b452SApple OSS Distributions
2555*94d3b452SApple OSS Distributions if (voucher_rv) {
2556*94d3b452SApple OSS Distributions return voucher_rv;
2557*94d3b452SApple OSS Distributions }
2558*94d3b452SApple OSS Distributions
2559*94d3b452SApple OSS Distributions if (fixedpri_rv) {
2560*94d3b452SApple OSS Distributions return fixedpri_rv;
2561*94d3b452SApple OSS Distributions }
2562*94d3b452SApple OSS Distributions
2563*94d3b452SApple OSS Distributions
2564*94d3b452SApple OSS Distributions return 0;
2565*94d3b452SApple OSS Distributions }
2566*94d3b452SApple OSS Distributions
2567*94d3b452SApple OSS Distributions static int
bsdthread_add_explicit_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t resource)2568*94d3b452SApple OSS Distributions bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
2569*94d3b452SApple OSS Distributions pthread_priority_t pp, user_addr_t resource)
2570*94d3b452SApple OSS Distributions {
2571*94d3b452SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2572*94d3b452SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
2573*94d3b452SApple OSS Distributions return EINVAL;
2574*94d3b452SApple OSS Distributions }
2575*94d3b452SApple OSS Distributions
2576*94d3b452SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2577*94d3b452SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2578*94d3b452SApple OSS Distributions if (th == THREAD_NULL) {
2579*94d3b452SApple OSS Distributions return ESRCH;
2580*94d3b452SApple OSS Distributions }
2581*94d3b452SApple OSS Distributions
2582*94d3b452SApple OSS Distributions int rv = proc_thread_qos_add_override(proc_task(p), th, 0, qos, TRUE,
2583*94d3b452SApple OSS Distributions resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2584*94d3b452SApple OSS Distributions
2585*94d3b452SApple OSS Distributions thread_deallocate(th);
2586*94d3b452SApple OSS Distributions return rv;
2587*94d3b452SApple OSS Distributions }
2588*94d3b452SApple OSS Distributions
2589*94d3b452SApple OSS Distributions static int
bsdthread_remove_explicit_override(proc_t p,mach_port_name_t kport,user_addr_t resource)2590*94d3b452SApple OSS Distributions bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
2591*94d3b452SApple OSS Distributions user_addr_t resource)
2592*94d3b452SApple OSS Distributions {
2593*94d3b452SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2594*94d3b452SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2595*94d3b452SApple OSS Distributions if (th == THREAD_NULL) {
2596*94d3b452SApple OSS Distributions return ESRCH;
2597*94d3b452SApple OSS Distributions }
2598*94d3b452SApple OSS Distributions
2599*94d3b452SApple OSS Distributions int rv = proc_thread_qos_remove_override(proc_task(p), th, 0, resource,
2600*94d3b452SApple OSS Distributions THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2601*94d3b452SApple OSS Distributions
2602*94d3b452SApple OSS Distributions thread_deallocate(th);
2603*94d3b452SApple OSS Distributions return rv;
2604*94d3b452SApple OSS Distributions }
2605*94d3b452SApple OSS Distributions
2606*94d3b452SApple OSS Distributions static int
workq_thread_add_dispatch_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t ulock_addr)2607*94d3b452SApple OSS Distributions workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
2608*94d3b452SApple OSS Distributions pthread_priority_t pp, user_addr_t ulock_addr)
2609*94d3b452SApple OSS Distributions {
2610*94d3b452SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2611*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2612*94d3b452SApple OSS Distributions
2613*94d3b452SApple OSS Distributions thread_qos_t qos_override = _pthread_priority_thread_qos(pp);
2614*94d3b452SApple OSS Distributions if (qos_override == THREAD_QOS_UNSPECIFIED) {
2615*94d3b452SApple OSS Distributions return EINVAL;
2616*94d3b452SApple OSS Distributions }
2617*94d3b452SApple OSS Distributions
2618*94d3b452SApple OSS Distributions thread_t thread = port_name_to_thread(kport,
2619*94d3b452SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2620*94d3b452SApple OSS Distributions if (thread == THREAD_NULL) {
2621*94d3b452SApple OSS Distributions return ESRCH;
2622*94d3b452SApple OSS Distributions }
2623*94d3b452SApple OSS Distributions
2624*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2625*94d3b452SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2626*94d3b452SApple OSS Distributions thread_deallocate(thread);
2627*94d3b452SApple OSS Distributions return EPERM;
2628*94d3b452SApple OSS Distributions }
2629*94d3b452SApple OSS Distributions
2630*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
2631*94d3b452SApple OSS Distributions wq, thread_tid(thread), 1, pp);
2632*94d3b452SApple OSS Distributions
2633*94d3b452SApple OSS Distributions thread_mtx_lock(thread);
2634*94d3b452SApple OSS Distributions
2635*94d3b452SApple OSS Distributions if (ulock_addr) {
2636*94d3b452SApple OSS Distributions uint32_t val;
2637*94d3b452SApple OSS Distributions int rc;
2638*94d3b452SApple OSS Distributions /*
2639*94d3b452SApple OSS Distributions * Workaround lack of explicit support for 'no-fault copyin'
2640*94d3b452SApple OSS Distributions * <rdar://problem/24999882>, as disabling preemption prevents paging in
2641*94d3b452SApple OSS Distributions */
2642*94d3b452SApple OSS Distributions disable_preemption();
2643*94d3b452SApple OSS Distributions rc = copyin_atomic32(ulock_addr, &val);
2644*94d3b452SApple OSS Distributions enable_preemption();
2645*94d3b452SApple OSS Distributions if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
2646*94d3b452SApple OSS Distributions goto out;
2647*94d3b452SApple OSS Distributions }
2648*94d3b452SApple OSS Distributions }
2649*94d3b452SApple OSS Distributions
2650*94d3b452SApple OSS Distributions workq_lock_spin(wq);
2651*94d3b452SApple OSS Distributions
2652*94d3b452SApple OSS Distributions old_pri = uth->uu_workq_pri;
2653*94d3b452SApple OSS Distributions if (old_pri.qos_override >= qos_override) {
2654*94d3b452SApple OSS Distributions /* Nothing to do */
2655*94d3b452SApple OSS Distributions } else if (thread == current_thread()) {
2656*94d3b452SApple OSS Distributions new_pri = old_pri;
2657*94d3b452SApple OSS Distributions new_pri.qos_override = qos_override;
2658*94d3b452SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2659*94d3b452SApple OSS Distributions } else {
2660*94d3b452SApple OSS Distributions uth->uu_workq_pri.qos_override = qos_override;
2661*94d3b452SApple OSS Distributions if (qos_override > workq_pri_override(old_pri)) {
2662*94d3b452SApple OSS Distributions thread_set_workq_override(thread, qos_override);
2663*94d3b452SApple OSS Distributions }
2664*94d3b452SApple OSS Distributions }
2665*94d3b452SApple OSS Distributions
2666*94d3b452SApple OSS Distributions workq_unlock(wq);
2667*94d3b452SApple OSS Distributions
2668*94d3b452SApple OSS Distributions out:
2669*94d3b452SApple OSS Distributions thread_mtx_unlock(thread);
2670*94d3b452SApple OSS Distributions thread_deallocate(thread);
2671*94d3b452SApple OSS Distributions return 0;
2672*94d3b452SApple OSS Distributions }
2673*94d3b452SApple OSS Distributions
2674*94d3b452SApple OSS Distributions static int
workq_thread_reset_dispatch_override(proc_t p,thread_t thread)2675*94d3b452SApple OSS Distributions workq_thread_reset_dispatch_override(proc_t p, thread_t thread)
2676*94d3b452SApple OSS Distributions {
2677*94d3b452SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2678*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2679*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2680*94d3b452SApple OSS Distributions
2681*94d3b452SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2682*94d3b452SApple OSS Distributions return EPERM;
2683*94d3b452SApple OSS Distributions }
2684*94d3b452SApple OSS Distributions
2685*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, wq, 0, 0, 0);
2686*94d3b452SApple OSS Distributions
2687*94d3b452SApple OSS Distributions /*
2688*94d3b452SApple OSS Distributions * workq_thread_add_dispatch_override takes the thread mutex before doing the
2689*94d3b452SApple OSS Distributions * copyin to validate the drainer and apply the override. We need to do the
2690*94d3b452SApple OSS Distributions * same here. See rdar://84472518
2691*94d3b452SApple OSS Distributions */
2692*94d3b452SApple OSS Distributions thread_mtx_lock(thread);
2693*94d3b452SApple OSS Distributions
2694*94d3b452SApple OSS Distributions workq_lock_spin(wq);
2695*94d3b452SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2696*94d3b452SApple OSS Distributions new_pri.qos_override = THREAD_QOS_UNSPECIFIED;
2697*94d3b452SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2698*94d3b452SApple OSS Distributions workq_unlock(wq);
2699*94d3b452SApple OSS Distributions
2700*94d3b452SApple OSS Distributions thread_mtx_unlock(thread);
2701*94d3b452SApple OSS Distributions return 0;
2702*94d3b452SApple OSS Distributions }
2703*94d3b452SApple OSS Distributions
2704*94d3b452SApple OSS Distributions static int
workq_thread_allow_kill(__unused proc_t p,thread_t thread,bool enable)2705*94d3b452SApple OSS Distributions workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
2706*94d3b452SApple OSS Distributions {
2707*94d3b452SApple OSS Distributions if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
2708*94d3b452SApple OSS Distributions // If the thread isn't a workqueue thread, don't set the
2709*94d3b452SApple OSS Distributions // kill_allowed bit; however, we still need to return 0
2710*94d3b452SApple OSS Distributions // instead of an error code since this code is executed
2711*94d3b452SApple OSS Distributions // on the abort path which needs to not depend on the
2712*94d3b452SApple OSS Distributions // pthread_t (returning an error depends on pthread_t via
2713*94d3b452SApple OSS Distributions // cerror_nocancel)
2714*94d3b452SApple OSS Distributions return 0;
2715*94d3b452SApple OSS Distributions }
2716*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2717*94d3b452SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = enable;
2718*94d3b452SApple OSS Distributions return 0;
2719*94d3b452SApple OSS Distributions }
2720*94d3b452SApple OSS Distributions
2721*94d3b452SApple OSS Distributions static int
workq_allow_sigmask(proc_t p,sigset_t mask)2722*94d3b452SApple OSS Distributions workq_allow_sigmask(proc_t p, sigset_t mask)
2723*94d3b452SApple OSS Distributions {
2724*94d3b452SApple OSS Distributions if (mask & workq_threadmask) {
2725*94d3b452SApple OSS Distributions return EINVAL;
2726*94d3b452SApple OSS Distributions }
2727*94d3b452SApple OSS Distributions
2728*94d3b452SApple OSS Distributions proc_lock(p);
2729*94d3b452SApple OSS Distributions p->p_workq_allow_sigmask |= mask;
2730*94d3b452SApple OSS Distributions proc_unlock(p);
2731*94d3b452SApple OSS Distributions
2732*94d3b452SApple OSS Distributions return 0;
2733*94d3b452SApple OSS Distributions }
2734*94d3b452SApple OSS Distributions
2735*94d3b452SApple OSS Distributions static int
bsdthread_get_max_parallelism(thread_qos_t qos,unsigned long flags,int * retval)2736*94d3b452SApple OSS Distributions bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
2737*94d3b452SApple OSS Distributions int *retval)
2738*94d3b452SApple OSS Distributions {
2739*94d3b452SApple OSS Distributions static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
2740*94d3b452SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
2741*94d3b452SApple OSS Distributions static_assert(QOS_PARALLELISM_REALTIME ==
2742*94d3b452SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
2743*94d3b452SApple OSS Distributions static_assert(QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE ==
2744*94d3b452SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC, "cluster shared resource");
2745*94d3b452SApple OSS Distributions
2746*94d3b452SApple OSS Distributions if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL | QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE)) {
2747*94d3b452SApple OSS Distributions return EINVAL;
2748*94d3b452SApple OSS Distributions }
2749*94d3b452SApple OSS Distributions
2750*94d3b452SApple OSS Distributions /* No units are present */
2751*94d3b452SApple OSS Distributions if (flags & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) {
2752*94d3b452SApple OSS Distributions return ENOTSUP;
2753*94d3b452SApple OSS Distributions }
2754*94d3b452SApple OSS Distributions
2755*94d3b452SApple OSS Distributions if (flags & QOS_PARALLELISM_REALTIME) {
2756*94d3b452SApple OSS Distributions if (qos) {
2757*94d3b452SApple OSS Distributions return EINVAL;
2758*94d3b452SApple OSS Distributions }
2759*94d3b452SApple OSS Distributions } else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
2760*94d3b452SApple OSS Distributions return EINVAL;
2761*94d3b452SApple OSS Distributions }
2762*94d3b452SApple OSS Distributions
2763*94d3b452SApple OSS Distributions *retval = qos_max_parallelism(qos, flags);
2764*94d3b452SApple OSS Distributions return 0;
2765*94d3b452SApple OSS Distributions }
2766*94d3b452SApple OSS Distributions
2767*94d3b452SApple OSS Distributions static int
bsdthread_dispatch_apply_attr(__unused struct proc * p,thread_t thread,unsigned long flags,uint64_t value1,__unused uint64_t value2)2768*94d3b452SApple OSS Distributions bsdthread_dispatch_apply_attr(__unused struct proc *p, thread_t thread,
2769*94d3b452SApple OSS Distributions unsigned long flags, uint64_t value1, __unused uint64_t value2)
2770*94d3b452SApple OSS Distributions {
2771*94d3b452SApple OSS Distributions uint32_t apply_worker_index;
2772*94d3b452SApple OSS Distributions kern_return_t kr;
2773*94d3b452SApple OSS Distributions
2774*94d3b452SApple OSS Distributions switch (flags) {
2775*94d3b452SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET:
2776*94d3b452SApple OSS Distributions apply_worker_index = (uint32_t)value1;
2777*94d3b452SApple OSS Distributions kr = thread_shared_rsrc_policy_set(thread, apply_worker_index, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2778*94d3b452SApple OSS Distributions /*
2779*94d3b452SApple OSS Distributions * KERN_INVALID_POLICY indicates that the thread was trying to bind to a
2780*94d3b452SApple OSS Distributions * cluster which it was not eligible to execute on.
2781*94d3b452SApple OSS Distributions */
2782*94d3b452SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : ((kr == KERN_INVALID_POLICY) ? ENOTSUP : EINVAL);
2783*94d3b452SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR:
2784*94d3b452SApple OSS Distributions kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2785*94d3b452SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : EINVAL;
2786*94d3b452SApple OSS Distributions default:
2787*94d3b452SApple OSS Distributions return EINVAL;
2788*94d3b452SApple OSS Distributions }
2789*94d3b452SApple OSS Distributions }
2790*94d3b452SApple OSS Distributions
2791*94d3b452SApple OSS Distributions #define ENSURE_UNUSED(arg) \
2792*94d3b452SApple OSS Distributions ({ if ((arg) != 0) { return EINVAL; } })
2793*94d3b452SApple OSS Distributions
2794*94d3b452SApple OSS Distributions int
bsdthread_ctl(struct proc * p,struct bsdthread_ctl_args * uap,int * retval)2795*94d3b452SApple OSS Distributions bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
2796*94d3b452SApple OSS Distributions {
2797*94d3b452SApple OSS Distributions switch (uap->cmd) {
2798*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_START:
2799*94d3b452SApple OSS Distributions return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
2800*94d3b452SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2801*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_END:
2802*94d3b452SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2803*94d3b452SApple OSS Distributions return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
2804*94d3b452SApple OSS Distributions (user_addr_t)uap->arg2);
2805*94d3b452SApple OSS Distributions
2806*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
2807*94d3b452SApple OSS Distributions return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
2808*94d3b452SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2809*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
2810*94d3b452SApple OSS Distributions return workq_thread_reset_dispatch_override(p, current_thread());
2811*94d3b452SApple OSS Distributions
2812*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_SET_SELF:
2813*94d3b452SApple OSS Distributions return bsdthread_set_self(p, current_thread(),
2814*94d3b452SApple OSS Distributions (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
2815*94d3b452SApple OSS Distributions (enum workq_set_self_flags)uap->arg3);
2816*94d3b452SApple OSS Distributions
2817*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
2818*94d3b452SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2819*94d3b452SApple OSS Distributions return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
2820*94d3b452SApple OSS Distributions (unsigned long)uap->arg2, retval);
2821*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
2822*94d3b452SApple OSS Distributions ENSURE_UNUSED(uap->arg2);
2823*94d3b452SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2824*94d3b452SApple OSS Distributions return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
2825*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_DISPATCH_APPLY_ATTR:
2826*94d3b452SApple OSS Distributions return bsdthread_dispatch_apply_attr(p, current_thread(),
2827*94d3b452SApple OSS Distributions (unsigned long)uap->arg1, (uint64_t)uap->arg2,
2828*94d3b452SApple OSS Distributions (uint64_t)uap->arg3);
2829*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_SIGMASK:
2830*94d3b452SApple OSS Distributions return workq_allow_sigmask(p, (int)uap->arg1);
2831*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_SET_QOS:
2832*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
2833*94d3b452SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
2834*94d3b452SApple OSS Distributions /* no longer supported */
2835*94d3b452SApple OSS Distributions return ENOTSUP;
2836*94d3b452SApple OSS Distributions
2837*94d3b452SApple OSS Distributions default:
2838*94d3b452SApple OSS Distributions return EINVAL;
2839*94d3b452SApple OSS Distributions }
2840*94d3b452SApple OSS Distributions }
2841*94d3b452SApple OSS Distributions
2842*94d3b452SApple OSS Distributions #pragma mark workqueue thread manipulation
2843*94d3b452SApple OSS Distributions
2844*94d3b452SApple OSS Distributions static void __dead2
2845*94d3b452SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2846*94d3b452SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2847*94d3b452SApple OSS Distributions
2848*94d3b452SApple OSS Distributions static void __dead2
2849*94d3b452SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2850*94d3b452SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2851*94d3b452SApple OSS Distributions
2852*94d3b452SApple OSS Distributions static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
2853*94d3b452SApple OSS Distributions
2854*94d3b452SApple OSS Distributions #if KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD
2855*94d3b452SApple OSS Distributions static inline uint64_t
workq_trace_req_id(workq_threadreq_t req)2856*94d3b452SApple OSS Distributions workq_trace_req_id(workq_threadreq_t req)
2857*94d3b452SApple OSS Distributions {
2858*94d3b452SApple OSS Distributions struct kqworkloop *kqwl;
2859*94d3b452SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2860*94d3b452SApple OSS Distributions kqwl = __container_of(req, struct kqworkloop, kqwl_request);
2861*94d3b452SApple OSS Distributions return kqwl->kqwl_dynamicid;
2862*94d3b452SApple OSS Distributions }
2863*94d3b452SApple OSS Distributions
2864*94d3b452SApple OSS Distributions return VM_KERNEL_ADDRHIDE(req);
2865*94d3b452SApple OSS Distributions }
2866*94d3b452SApple OSS Distributions #endif
2867*94d3b452SApple OSS Distributions
2868*94d3b452SApple OSS Distributions /**
2869*94d3b452SApple OSS Distributions * Entry point for libdispatch to ask for threads
2870*94d3b452SApple OSS Distributions */
2871*94d3b452SApple OSS Distributions static int
workq_reqthreads(struct proc * p,uint32_t reqcount,pthread_priority_t pp,bool cooperative)2872*94d3b452SApple OSS Distributions workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp, bool cooperative)
2873*94d3b452SApple OSS Distributions {
2874*94d3b452SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2875*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2876*94d3b452SApple OSS Distributions uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
2877*94d3b452SApple OSS Distributions int ret = 0;
2878*94d3b452SApple OSS Distributions
2879*94d3b452SApple OSS Distributions if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
2880*94d3b452SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
2881*94d3b452SApple OSS Distributions ret = EINVAL;
2882*94d3b452SApple OSS Distributions goto exit;
2883*94d3b452SApple OSS Distributions }
2884*94d3b452SApple OSS Distributions
2885*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
2886*94d3b452SApple OSS Distributions wq, reqcount, pp, cooperative);
2887*94d3b452SApple OSS Distributions
2888*94d3b452SApple OSS Distributions workq_threadreq_t req = zalloc(workq_zone_threadreq);
2889*94d3b452SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
2890*94d3b452SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
2891*94d3b452SApple OSS Distributions req->tr_qos = qos;
2892*94d3b452SApple OSS Distributions workq_tr_flags_t tr_flags = 0;
2893*94d3b452SApple OSS Distributions
2894*94d3b452SApple OSS Distributions if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
2895*94d3b452SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
2896*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
2897*94d3b452SApple OSS Distributions }
2898*94d3b452SApple OSS Distributions
2899*94d3b452SApple OSS Distributions if (cooperative) {
2900*94d3b452SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_COOPERATIVE;
2901*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
2902*94d3b452SApple OSS Distributions
2903*94d3b452SApple OSS Distributions if (reqcount > 1) {
2904*94d3b452SApple OSS Distributions ret = ENOTSUP;
2905*94d3b452SApple OSS Distributions goto free_and_exit;
2906*94d3b452SApple OSS Distributions }
2907*94d3b452SApple OSS Distributions }
2908*94d3b452SApple OSS Distributions
2909*94d3b452SApple OSS Distributions /* A thread request cannot be both overcommit and cooperative */
2910*94d3b452SApple OSS Distributions if (workq_tr_is_cooperative(tr_flags) &&
2911*94d3b452SApple OSS Distributions workq_tr_is_overcommit(tr_flags)) {
2912*94d3b452SApple OSS Distributions ret = EINVAL;
2913*94d3b452SApple OSS Distributions goto free_and_exit;
2914*94d3b452SApple OSS Distributions }
2915*94d3b452SApple OSS Distributions req->tr_flags = tr_flags;
2916*94d3b452SApple OSS Distributions
2917*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
2918*94d3b452SApple OSS Distributions wq, workq_trace_req_id(req), req->tr_qos, reqcount);
2919*94d3b452SApple OSS Distributions
2920*94d3b452SApple OSS Distributions workq_lock_spin(wq);
2921*94d3b452SApple OSS Distributions do {
2922*94d3b452SApple OSS Distributions if (_wq_exiting(wq)) {
2923*94d3b452SApple OSS Distributions goto unlock_and_exit;
2924*94d3b452SApple OSS Distributions }
2925*94d3b452SApple OSS Distributions
2926*94d3b452SApple OSS Distributions /*
2927*94d3b452SApple OSS Distributions * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
2928*94d3b452SApple OSS Distributions * threads without pacing, to inform the scheduler of that workload.
2929*94d3b452SApple OSS Distributions *
2930*94d3b452SApple OSS Distributions * The last requests, or the ones that failed the admission checks are
2931*94d3b452SApple OSS Distributions * enqueued and go through the regular creator codepath.
2932*94d3b452SApple OSS Distributions *
2933*94d3b452SApple OSS Distributions * If there aren't enough threads, add one, but re-evaluate everything
2934*94d3b452SApple OSS Distributions * as conditions may now have changed.
2935*94d3b452SApple OSS Distributions */
2936*94d3b452SApple OSS Distributions unpaced = reqcount - 1;
2937*94d3b452SApple OSS Distributions
2938*94d3b452SApple OSS Distributions if (reqcount > 1) {
2939*94d3b452SApple OSS Distributions /* We don't handle asking for parallelism on the cooperative
2940*94d3b452SApple OSS Distributions * workqueue just yet */
2941*94d3b452SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
2942*94d3b452SApple OSS Distributions
2943*94d3b452SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
2944*94d3b452SApple OSS Distributions unpaced = workq_constrained_allowance(wq, qos, NULL, false);
2945*94d3b452SApple OSS Distributions if (unpaced >= reqcount - 1) {
2946*94d3b452SApple OSS Distributions unpaced = reqcount - 1;
2947*94d3b452SApple OSS Distributions }
2948*94d3b452SApple OSS Distributions }
2949*94d3b452SApple OSS Distributions }
2950*94d3b452SApple OSS Distributions
2951*94d3b452SApple OSS Distributions /*
2952*94d3b452SApple OSS Distributions * This path does not currently handle custom workloop parameters
2953*94d3b452SApple OSS Distributions * when creating threads for parallelism.
2954*94d3b452SApple OSS Distributions */
2955*94d3b452SApple OSS Distributions assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
2956*94d3b452SApple OSS Distributions
2957*94d3b452SApple OSS Distributions /*
2958*94d3b452SApple OSS Distributions * This is a trimmed down version of workq_threadreq_bind_and_unlock()
2959*94d3b452SApple OSS Distributions */
2960*94d3b452SApple OSS Distributions while (unpaced > 0 && wq->wq_thidlecount) {
2961*94d3b452SApple OSS Distributions struct uthread *uth;
2962*94d3b452SApple OSS Distributions bool needs_wakeup;
2963*94d3b452SApple OSS Distributions uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
2964*94d3b452SApple OSS Distributions
2965*94d3b452SApple OSS Distributions if (workq_tr_is_overcommit(req->tr_flags)) {
2966*94d3b452SApple OSS Distributions uu_flags |= UT_WORKQ_OVERCOMMIT;
2967*94d3b452SApple OSS Distributions }
2968*94d3b452SApple OSS Distributions
2969*94d3b452SApple OSS Distributions uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
2970*94d3b452SApple OSS Distributions
2971*94d3b452SApple OSS Distributions _wq_thactive_inc(wq, qos);
2972*94d3b452SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(qos)]++;
2973*94d3b452SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
2974*94d3b452SApple OSS Distributions wq->wq_fulfilled++;
2975*94d3b452SApple OSS Distributions
2976*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
2977*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.thread_request = req;
2978*94d3b452SApple OSS Distributions if (needs_wakeup) {
2979*94d3b452SApple OSS Distributions workq_thread_wakeup(uth);
2980*94d3b452SApple OSS Distributions }
2981*94d3b452SApple OSS Distributions unpaced--;
2982*94d3b452SApple OSS Distributions reqcount--;
2983*94d3b452SApple OSS Distributions }
2984*94d3b452SApple OSS Distributions } while (unpaced && wq->wq_nthreads < wq_max_threads &&
2985*94d3b452SApple OSS Distributions workq_add_new_idle_thread(p, wq));
2986*94d3b452SApple OSS Distributions
2987*94d3b452SApple OSS Distributions if (_wq_exiting(wq)) {
2988*94d3b452SApple OSS Distributions goto unlock_and_exit;
2989*94d3b452SApple OSS Distributions }
2990*94d3b452SApple OSS Distributions
2991*94d3b452SApple OSS Distributions req->tr_count = (uint16_t)reqcount;
2992*94d3b452SApple OSS Distributions if (workq_threadreq_enqueue(wq, req)) {
2993*94d3b452SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
2994*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
2995*94d3b452SApple OSS Distributions }
2996*94d3b452SApple OSS Distributions workq_unlock(wq);
2997*94d3b452SApple OSS Distributions return 0;
2998*94d3b452SApple OSS Distributions
2999*94d3b452SApple OSS Distributions unlock_and_exit:
3000*94d3b452SApple OSS Distributions workq_unlock(wq);
3001*94d3b452SApple OSS Distributions free_and_exit:
3002*94d3b452SApple OSS Distributions zfree(workq_zone_threadreq, req);
3003*94d3b452SApple OSS Distributions exit:
3004*94d3b452SApple OSS Distributions return ret;
3005*94d3b452SApple OSS Distributions }
3006*94d3b452SApple OSS Distributions
3007*94d3b452SApple OSS Distributions bool
workq_kern_threadreq_initiate(struct proc * p,workq_threadreq_t req,struct turnstile * workloop_ts,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3008*94d3b452SApple OSS Distributions workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
3009*94d3b452SApple OSS Distributions struct turnstile *workloop_ts, thread_qos_t qos,
3010*94d3b452SApple OSS Distributions workq_kern_threadreq_flags_t flags)
3011*94d3b452SApple OSS Distributions {
3012*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3013*94d3b452SApple OSS Distributions struct uthread *uth = NULL;
3014*94d3b452SApple OSS Distributions
3015*94d3b452SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
3016*94d3b452SApple OSS Distributions
3017*94d3b452SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3018*94d3b452SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
3019*94d3b452SApple OSS Distributions qos = thread_workq_qos_for_pri(trp.trp_pri);
3020*94d3b452SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3021*94d3b452SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3022*94d3b452SApple OSS Distributions }
3023*94d3b452SApple OSS Distributions }
3024*94d3b452SApple OSS Distributions
3025*94d3b452SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_IDLE);
3026*94d3b452SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
3027*94d3b452SApple OSS Distributions req->tr_count = 1;
3028*94d3b452SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3029*94d3b452SApple OSS Distributions req->tr_qos = qos;
3030*94d3b452SApple OSS Distributions
3031*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
3032*94d3b452SApple OSS Distributions workq_trace_req_id(req), qos, 1);
3033*94d3b452SApple OSS Distributions
3034*94d3b452SApple OSS Distributions if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
3035*94d3b452SApple OSS Distributions /*
3036*94d3b452SApple OSS Distributions * we're called back synchronously from the context of
3037*94d3b452SApple OSS Distributions * kqueue_threadreq_unbind from within workq_thread_return()
3038*94d3b452SApple OSS Distributions * we can try to match up this thread with this request !
3039*94d3b452SApple OSS Distributions */
3040*94d3b452SApple OSS Distributions uth = current_uthread();
3041*94d3b452SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3042*94d3b452SApple OSS Distributions }
3043*94d3b452SApple OSS Distributions
3044*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3045*94d3b452SApple OSS Distributions if (_wq_exiting(wq)) {
3046*94d3b452SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_IDLE;
3047*94d3b452SApple OSS Distributions workq_unlock(wq);
3048*94d3b452SApple OSS Distributions return false;
3049*94d3b452SApple OSS Distributions }
3050*94d3b452SApple OSS Distributions
3051*94d3b452SApple OSS Distributions if (uth && workq_threadreq_admissible(wq, uth, req)) {
3052*94d3b452SApple OSS Distributions /* This is the case of the rebind - we were about to park and unbind
3053*94d3b452SApple OSS Distributions * when more events came so keep the binding.
3054*94d3b452SApple OSS Distributions */
3055*94d3b452SApple OSS Distributions assert(uth != wq->wq_creator);
3056*94d3b452SApple OSS Distributions
3057*94d3b452SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
3058*94d3b452SApple OSS Distributions _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
3059*94d3b452SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
3060*94d3b452SApple OSS Distributions }
3061*94d3b452SApple OSS Distributions /*
3062*94d3b452SApple OSS Distributions * We're called from workq_kern_threadreq_initiate()
3063*94d3b452SApple OSS Distributions * due to an unbind, with the kq req held.
3064*94d3b452SApple OSS Distributions */
3065*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
3066*94d3b452SApple OSS Distributions workq_trace_req_id(req), req->tr_flags, 0);
3067*94d3b452SApple OSS Distributions wq->wq_fulfilled++;
3068*94d3b452SApple OSS Distributions
3069*94d3b452SApple OSS Distributions kqueue_threadreq_bind(p, req, get_machthread(uth), 0);
3070*94d3b452SApple OSS Distributions } else {
3071*94d3b452SApple OSS Distributions if (workloop_ts) {
3072*94d3b452SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3073*94d3b452SApple OSS Distributions turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
3074*94d3b452SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
3075*94d3b452SApple OSS Distributions turnstile_update_inheritor_complete(workloop_ts,
3076*94d3b452SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
3077*94d3b452SApple OSS Distributions });
3078*94d3b452SApple OSS Distributions }
3079*94d3b452SApple OSS Distributions
3080*94d3b452SApple OSS Distributions bool reevaluate_creator_thread_group = false;
3081*94d3b452SApple OSS Distributions #if CONFIG_PREADOPT_TG
3082*94d3b452SApple OSS Distributions reevaluate_creator_thread_group = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3083*94d3b452SApple OSS Distributions #endif
3084*94d3b452SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3085*94d3b452SApple OSS Distributions * the creator needs a thread group pre-adoption */
3086*94d3b452SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_thread_group) {
3087*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3088*94d3b452SApple OSS Distributions }
3089*94d3b452SApple OSS Distributions }
3090*94d3b452SApple OSS Distributions
3091*94d3b452SApple OSS Distributions workq_unlock(wq);
3092*94d3b452SApple OSS Distributions
3093*94d3b452SApple OSS Distributions return true;
3094*94d3b452SApple OSS Distributions }
3095*94d3b452SApple OSS Distributions
3096*94d3b452SApple OSS Distributions void
workq_kern_threadreq_modify(struct proc * p,workq_threadreq_t req,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3097*94d3b452SApple OSS Distributions workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
3098*94d3b452SApple OSS Distributions thread_qos_t qos, workq_kern_threadreq_flags_t flags)
3099*94d3b452SApple OSS Distributions {
3100*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3101*94d3b452SApple OSS Distributions bool make_overcommit = false;
3102*94d3b452SApple OSS Distributions
3103*94d3b452SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3104*94d3b452SApple OSS Distributions /* Requests outside-of-QoS shouldn't accept modify operations */
3105*94d3b452SApple OSS Distributions return;
3106*94d3b452SApple OSS Distributions }
3107*94d3b452SApple OSS Distributions
3108*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3109*94d3b452SApple OSS Distributions
3110*94d3b452SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3111*94d3b452SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
3112*94d3b452SApple OSS Distributions
3113*94d3b452SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3114*94d3b452SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread, 0);
3115*94d3b452SApple OSS Distributions workq_unlock(wq);
3116*94d3b452SApple OSS Distributions return;
3117*94d3b452SApple OSS Distributions }
3118*94d3b452SApple OSS Distributions
3119*94d3b452SApple OSS Distributions if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
3120*94d3b452SApple OSS Distributions /* TODO (rokhinip): We come into this code path for kqwl thread
3121*94d3b452SApple OSS Distributions * requests. kqwl requests cannot be cooperative.
3122*94d3b452SApple OSS Distributions */
3123*94d3b452SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
3124*94d3b452SApple OSS Distributions
3125*94d3b452SApple OSS Distributions make_overcommit = workq_threadreq_is_nonovercommit(req);
3126*94d3b452SApple OSS Distributions }
3127*94d3b452SApple OSS Distributions
3128*94d3b452SApple OSS Distributions if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
3129*94d3b452SApple OSS Distributions workq_unlock(wq);
3130*94d3b452SApple OSS Distributions return;
3131*94d3b452SApple OSS Distributions }
3132*94d3b452SApple OSS Distributions
3133*94d3b452SApple OSS Distributions assert(req->tr_count == 1);
3134*94d3b452SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3135*94d3b452SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3136*94d3b452SApple OSS Distributions }
3137*94d3b452SApple OSS Distributions
3138*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
3139*94d3b452SApple OSS Distributions workq_trace_req_id(req), qos, 0);
3140*94d3b452SApple OSS Distributions
3141*94d3b452SApple OSS Distributions struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
3142*94d3b452SApple OSS Distributions workq_threadreq_t req_max;
3143*94d3b452SApple OSS Distributions
3144*94d3b452SApple OSS Distributions /*
3145*94d3b452SApple OSS Distributions * Stage 1: Dequeue the request from its priority queue.
3146*94d3b452SApple OSS Distributions *
3147*94d3b452SApple OSS Distributions * If we dequeue the root item of the constrained priority queue,
3148*94d3b452SApple OSS Distributions * maintain the best constrained request qos invariant.
3149*94d3b452SApple OSS Distributions */
3150*94d3b452SApple OSS Distributions if (priority_queue_remove(pq, &req->tr_entry)) {
3151*94d3b452SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3152*94d3b452SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
3153*94d3b452SApple OSS Distributions }
3154*94d3b452SApple OSS Distributions }
3155*94d3b452SApple OSS Distributions
3156*94d3b452SApple OSS Distributions /*
3157*94d3b452SApple OSS Distributions * Stage 2: Apply changes to the thread request
3158*94d3b452SApple OSS Distributions *
3159*94d3b452SApple OSS Distributions * If the item will not become the root of the priority queue it belongs to,
3160*94d3b452SApple OSS Distributions * then we need to wait in line, just enqueue and return quickly.
3161*94d3b452SApple OSS Distributions */
3162*94d3b452SApple OSS Distributions if (__improbable(make_overcommit)) {
3163*94d3b452SApple OSS Distributions req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
3164*94d3b452SApple OSS Distributions pq = workq_priority_queue_for_req(wq, req);
3165*94d3b452SApple OSS Distributions }
3166*94d3b452SApple OSS Distributions req->tr_qos = qos;
3167*94d3b452SApple OSS Distributions
3168*94d3b452SApple OSS Distributions req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
3169*94d3b452SApple OSS Distributions if (req_max && req_max->tr_qos >= qos) {
3170*94d3b452SApple OSS Distributions priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
3171*94d3b452SApple OSS Distributions workq_priority_for_req(req), false);
3172*94d3b452SApple OSS Distributions priority_queue_insert(pq, &req->tr_entry);
3173*94d3b452SApple OSS Distributions workq_unlock(wq);
3174*94d3b452SApple OSS Distributions return;
3175*94d3b452SApple OSS Distributions }
3176*94d3b452SApple OSS Distributions
3177*94d3b452SApple OSS Distributions /*
3178*94d3b452SApple OSS Distributions * Stage 3: Reevaluate whether we should run the thread request.
3179*94d3b452SApple OSS Distributions *
3180*94d3b452SApple OSS Distributions * Pretend the thread request is new again:
3181*94d3b452SApple OSS Distributions * - adjust wq_reqcount to not count it anymore.
3182*94d3b452SApple OSS Distributions * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
3183*94d3b452SApple OSS Distributions * properly attempts a synchronous bind)
3184*94d3b452SApple OSS Distributions */
3185*94d3b452SApple OSS Distributions wq->wq_reqcount--;
3186*94d3b452SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3187*94d3b452SApple OSS Distributions
3188*94d3b452SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3189*94d3b452SApple OSS Distributions * the creator needs a thread group pre-adoption if the request got a new TG */
3190*94d3b452SApple OSS Distributions bool reevaluate_creator_tg = false;
3191*94d3b452SApple OSS Distributions
3192*94d3b452SApple OSS Distributions #if CONFIG_PREADOPT_TG
3193*94d3b452SApple OSS Distributions reevaluate_creator_tg = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3194*94d3b452SApple OSS Distributions #endif
3195*94d3b452SApple OSS Distributions
3196*94d3b452SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_tg) {
3197*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3198*94d3b452SApple OSS Distributions }
3199*94d3b452SApple OSS Distributions workq_unlock(wq);
3200*94d3b452SApple OSS Distributions }
3201*94d3b452SApple OSS Distributions
3202*94d3b452SApple OSS Distributions void
workq_kern_threadreq_lock(struct proc * p)3203*94d3b452SApple OSS Distributions workq_kern_threadreq_lock(struct proc *p)
3204*94d3b452SApple OSS Distributions {
3205*94d3b452SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(p));
3206*94d3b452SApple OSS Distributions }
3207*94d3b452SApple OSS Distributions
3208*94d3b452SApple OSS Distributions void
workq_kern_threadreq_unlock(struct proc * p)3209*94d3b452SApple OSS Distributions workq_kern_threadreq_unlock(struct proc *p)
3210*94d3b452SApple OSS Distributions {
3211*94d3b452SApple OSS Distributions workq_unlock(proc_get_wqptr_fast(p));
3212*94d3b452SApple OSS Distributions }
3213*94d3b452SApple OSS Distributions
3214*94d3b452SApple OSS Distributions void
workq_kern_threadreq_update_inheritor(struct proc * p,workq_threadreq_t req,thread_t owner,struct turnstile * wl_ts,turnstile_update_flags_t flags)3215*94d3b452SApple OSS Distributions workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
3216*94d3b452SApple OSS Distributions thread_t owner, struct turnstile *wl_ts,
3217*94d3b452SApple OSS Distributions turnstile_update_flags_t flags)
3218*94d3b452SApple OSS Distributions {
3219*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3220*94d3b452SApple OSS Distributions turnstile_inheritor_t inheritor;
3221*94d3b452SApple OSS Distributions
3222*94d3b452SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3223*94d3b452SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
3224*94d3b452SApple OSS Distributions workq_lock_held(wq);
3225*94d3b452SApple OSS Distributions
3226*94d3b452SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3227*94d3b452SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread,
3228*94d3b452SApple OSS Distributions KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE);
3229*94d3b452SApple OSS Distributions return;
3230*94d3b452SApple OSS Distributions }
3231*94d3b452SApple OSS Distributions
3232*94d3b452SApple OSS Distributions if (_wq_exiting(wq)) {
3233*94d3b452SApple OSS Distributions inheritor = TURNSTILE_INHERITOR_NULL;
3234*94d3b452SApple OSS Distributions } else {
3235*94d3b452SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3236*94d3b452SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3237*94d3b452SApple OSS Distributions }
3238*94d3b452SApple OSS Distributions
3239*94d3b452SApple OSS Distributions if (owner) {
3240*94d3b452SApple OSS Distributions inheritor = owner;
3241*94d3b452SApple OSS Distributions flags |= TURNSTILE_INHERITOR_THREAD;
3242*94d3b452SApple OSS Distributions } else {
3243*94d3b452SApple OSS Distributions inheritor = wq->wq_turnstile;
3244*94d3b452SApple OSS Distributions flags |= TURNSTILE_INHERITOR_TURNSTILE;
3245*94d3b452SApple OSS Distributions }
3246*94d3b452SApple OSS Distributions }
3247*94d3b452SApple OSS Distributions
3248*94d3b452SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3249*94d3b452SApple OSS Distributions turnstile_update_inheritor(wl_ts, inheritor, flags);
3250*94d3b452SApple OSS Distributions });
3251*94d3b452SApple OSS Distributions }
3252*94d3b452SApple OSS Distributions
3253*94d3b452SApple OSS Distributions void
workq_kern_threadreq_redrive(struct proc * p,workq_kern_threadreq_flags_t flags)3254*94d3b452SApple OSS Distributions workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
3255*94d3b452SApple OSS Distributions {
3256*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3257*94d3b452SApple OSS Distributions
3258*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3259*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3260*94d3b452SApple OSS Distributions workq_unlock(wq);
3261*94d3b452SApple OSS Distributions }
3262*94d3b452SApple OSS Distributions
3263*94d3b452SApple OSS Distributions /*
3264*94d3b452SApple OSS Distributions * Always called at AST by the thread on itself
3265*94d3b452SApple OSS Distributions *
3266*94d3b452SApple OSS Distributions * Upon quantum expiry, the workqueue subsystem evaluates its state and decides
3267*94d3b452SApple OSS Distributions * on what the thread should do next. The TSD value is always set by the thread
3268*94d3b452SApple OSS Distributions * on itself in the kernel and cleared either by userspace when it acks the TSD
3269*94d3b452SApple OSS Distributions * value and takes action, or by the thread in the kernel when the quantum
3270*94d3b452SApple OSS Distributions * expires again.
3271*94d3b452SApple OSS Distributions */
3272*94d3b452SApple OSS Distributions void
workq_kern_quantum_expiry_reevaluate(proc_t proc,thread_t thread)3273*94d3b452SApple OSS Distributions workq_kern_quantum_expiry_reevaluate(proc_t proc, thread_t thread)
3274*94d3b452SApple OSS Distributions {
3275*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
3276*94d3b452SApple OSS Distributions
3277*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3278*94d3b452SApple OSS Distributions return;
3279*94d3b452SApple OSS Distributions }
3280*94d3b452SApple OSS Distributions
3281*94d3b452SApple OSS Distributions if (!thread_supports_cooperative_workqueue(thread)) {
3282*94d3b452SApple OSS Distributions panic("Quantum expired for thread that doesn't support cooperative workqueue");
3283*94d3b452SApple OSS Distributions }
3284*94d3b452SApple OSS Distributions
3285*94d3b452SApple OSS Distributions thread_qos_t qos = uth->uu_workq_pri.qos_bucket;
3286*94d3b452SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3287*94d3b452SApple OSS Distributions panic("Thread should not have workq bucket of QoS UN");
3288*94d3b452SApple OSS Distributions }
3289*94d3b452SApple OSS Distributions
3290*94d3b452SApple OSS Distributions assert(thread_has_expired_workqueue_quantum(thread, false));
3291*94d3b452SApple OSS Distributions
3292*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(proc);
3293*94d3b452SApple OSS Distributions assert(wq != NULL);
3294*94d3b452SApple OSS Distributions
3295*94d3b452SApple OSS Distributions /*
3296*94d3b452SApple OSS Distributions * For starters, we're just going to evaluate and see if we need to narrow
3297*94d3b452SApple OSS Distributions * the pool and tell this thread to park if needed. In the future, we'll
3298*94d3b452SApple OSS Distributions * evaluate and convey other workqueue state information like needing to
3299*94d3b452SApple OSS Distributions * pump kevents, etc.
3300*94d3b452SApple OSS Distributions */
3301*94d3b452SApple OSS Distributions uint64_t flags = 0;
3302*94d3b452SApple OSS Distributions
3303*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3304*94d3b452SApple OSS Distributions
3305*94d3b452SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
3306*94d3b452SApple OSS Distributions if (!workq_cooperative_allowance(wq, qos, uth, false)) {
3307*94d3b452SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3308*94d3b452SApple OSS Distributions } else {
3309*94d3b452SApple OSS Distributions /* In the future, when we have kevent hookups for the cooperative
3310*94d3b452SApple OSS Distributions * pool, we need fancier logic for what userspace should do. But
3311*94d3b452SApple OSS Distributions * right now, only userspace thread requests exist - so we'll just
3312*94d3b452SApple OSS Distributions * tell userspace to shuffle work items */
3313*94d3b452SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE;
3314*94d3b452SApple OSS Distributions }
3315*94d3b452SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
3316*94d3b452SApple OSS Distributions if (!workq_constrained_allowance(wq, qos, uth, false)) {
3317*94d3b452SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3318*94d3b452SApple OSS Distributions }
3319*94d3b452SApple OSS Distributions }
3320*94d3b452SApple OSS Distributions workq_unlock(wq);
3321*94d3b452SApple OSS Distributions
3322*94d3b452SApple OSS Distributions WQ_TRACE(TRACE_wq_quantum_expiry_reevaluate, flags, 0, 0, 0);
3323*94d3b452SApple OSS Distributions
3324*94d3b452SApple OSS Distributions kevent_set_workq_quantum_expiry_user_tsd(proc, thread, flags);
3325*94d3b452SApple OSS Distributions
3326*94d3b452SApple OSS Distributions /* We have conveyed to userspace about what it needs to do upon quantum
3327*94d3b452SApple OSS Distributions * expiry, now rearm the workqueue quantum again */
3328*94d3b452SApple OSS Distributions thread_arm_workqueue_quantum(get_machthread(uth));
3329*94d3b452SApple OSS Distributions }
3330*94d3b452SApple OSS Distributions
3331*94d3b452SApple OSS Distributions void
workq_schedule_creator_turnstile_redrive(struct workqueue * wq,bool locked)3332*94d3b452SApple OSS Distributions workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
3333*94d3b452SApple OSS Distributions {
3334*94d3b452SApple OSS Distributions if (locked) {
3335*94d3b452SApple OSS Distributions workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
3336*94d3b452SApple OSS Distributions } else {
3337*94d3b452SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
3338*94d3b452SApple OSS Distributions }
3339*94d3b452SApple OSS Distributions }
3340*94d3b452SApple OSS Distributions
3341*94d3b452SApple OSS Distributions static int
workq_thread_return(struct proc * p,struct workq_kernreturn_args * uap,struct workqueue * wq)3342*94d3b452SApple OSS Distributions workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
3343*94d3b452SApple OSS Distributions struct workqueue *wq)
3344*94d3b452SApple OSS Distributions {
3345*94d3b452SApple OSS Distributions thread_t th = current_thread();
3346*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3347*94d3b452SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
3348*94d3b452SApple OSS Distributions workq_threadreq_param_t trp = { };
3349*94d3b452SApple OSS Distributions int nevents = uap->affinity, error;
3350*94d3b452SApple OSS Distributions user_addr_t eventlist = uap->item;
3351*94d3b452SApple OSS Distributions
3352*94d3b452SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3353*94d3b452SApple OSS Distributions (uth->uu_workq_flags & UT_WORKQ_DYING)) {
3354*94d3b452SApple OSS Distributions return EINVAL;
3355*94d3b452SApple OSS Distributions }
3356*94d3b452SApple OSS Distributions
3357*94d3b452SApple OSS Distributions if (eventlist && nevents && kqr == NULL) {
3358*94d3b452SApple OSS Distributions return EINVAL;
3359*94d3b452SApple OSS Distributions }
3360*94d3b452SApple OSS Distributions
3361*94d3b452SApple OSS Distributions /*
3362*94d3b452SApple OSS Distributions * Reset signal mask on the workqueue thread to default state,
3363*94d3b452SApple OSS Distributions * but do not touch any signals that are marked for preservation.
3364*94d3b452SApple OSS Distributions */
3365*94d3b452SApple OSS Distributions sigset_t resettable = uth->uu_sigmask & ~p->p_workq_allow_sigmask;
3366*94d3b452SApple OSS Distributions if (resettable != (sigset_t)~workq_threadmask) {
3367*94d3b452SApple OSS Distributions proc_lock(p);
3368*94d3b452SApple OSS Distributions uth->uu_sigmask |= ~workq_threadmask & ~p->p_workq_allow_sigmask;
3369*94d3b452SApple OSS Distributions proc_unlock(p);
3370*94d3b452SApple OSS Distributions }
3371*94d3b452SApple OSS Distributions
3372*94d3b452SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
3373*94d3b452SApple OSS Distributions /*
3374*94d3b452SApple OSS Distributions * Ensure we store the threadreq param before unbinding
3375*94d3b452SApple OSS Distributions * the kqr from this thread.
3376*94d3b452SApple OSS Distributions */
3377*94d3b452SApple OSS Distributions trp = kqueue_threadreq_workloop_param(kqr);
3378*94d3b452SApple OSS Distributions }
3379*94d3b452SApple OSS Distributions
3380*94d3b452SApple OSS Distributions /*
3381*94d3b452SApple OSS Distributions * Freeze the base pri while we decide the fate of this thread.
3382*94d3b452SApple OSS Distributions *
3383*94d3b452SApple OSS Distributions * Either:
3384*94d3b452SApple OSS Distributions * - we return to user and kevent_cleanup will have unfrozen the base pri,
3385*94d3b452SApple OSS Distributions * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
3386*94d3b452SApple OSS Distributions */
3387*94d3b452SApple OSS Distributions thread_freeze_base_pri(th);
3388*94d3b452SApple OSS Distributions
3389*94d3b452SApple OSS Distributions if (kqr) {
3390*94d3b452SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
3391*94d3b452SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
3392*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
3393*94d3b452SApple OSS Distributions } else {
3394*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
3395*94d3b452SApple OSS Distributions }
3396*94d3b452SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
3397*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
3398*94d3b452SApple OSS Distributions } else {
3399*94d3b452SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
3400*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
3401*94d3b452SApple OSS Distributions }
3402*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
3403*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
3404*94d3b452SApple OSS Distributions } else {
3405*94d3b452SApple OSS Distributions upcall_flags |= uth->uu_workq_pri.qos_req |
3406*94d3b452SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3407*94d3b452SApple OSS Distributions }
3408*94d3b452SApple OSS Distributions }
3409*94d3b452SApple OSS Distributions error = pthread_functions->workq_handle_stack_events(p, th,
3410*94d3b452SApple OSS Distributions get_task_map(proc_task(p)), uth->uu_workq_stackaddr,
3411*94d3b452SApple OSS Distributions uth->uu_workq_thport, eventlist, nevents, upcall_flags);
3412*94d3b452SApple OSS Distributions if (error) {
3413*94d3b452SApple OSS Distributions assert(uth->uu_kqr_bound == kqr);
3414*94d3b452SApple OSS Distributions return error;
3415*94d3b452SApple OSS Distributions }
3416*94d3b452SApple OSS Distributions
3417*94d3b452SApple OSS Distributions // pthread is supposed to pass KEVENT_FLAG_PARKING here
3418*94d3b452SApple OSS Distributions // which should cause the above call to either:
3419*94d3b452SApple OSS Distributions // - not return
3420*94d3b452SApple OSS Distributions // - return an error
3421*94d3b452SApple OSS Distributions // - return 0 and have unbound properly
3422*94d3b452SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3423*94d3b452SApple OSS Distributions }
3424*94d3b452SApple OSS Distributions
3425*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, uap->options, 0, 0);
3426*94d3b452SApple OSS Distributions
3427*94d3b452SApple OSS Distributions thread_sched_call(th, NULL);
3428*94d3b452SApple OSS Distributions thread_will_park_or_terminate(th);
3429*94d3b452SApple OSS Distributions #if CONFIG_WORKLOOP_DEBUG
3430*94d3b452SApple OSS Distributions UU_KEVENT_HISTORY_WRITE_ENTRY(uth, { .uu_error = -1, });
3431*94d3b452SApple OSS Distributions #endif
3432*94d3b452SApple OSS Distributions
3433*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3434*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3435*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
3436*94d3b452SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth,
3437*94d3b452SApple OSS Distributions WQ_SETUP_CLEAR_VOUCHER);
3438*94d3b452SApple OSS Distributions __builtin_unreachable();
3439*94d3b452SApple OSS Distributions }
3440*94d3b452SApple OSS Distributions
3441*94d3b452SApple OSS Distributions /**
3442*94d3b452SApple OSS Distributions * Multiplexed call to interact with the workqueue mechanism
3443*94d3b452SApple OSS Distributions */
3444*94d3b452SApple OSS Distributions int
workq_kernreturn(struct proc * p,struct workq_kernreturn_args * uap,int32_t * retval)3445*94d3b452SApple OSS Distributions workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
3446*94d3b452SApple OSS Distributions {
3447*94d3b452SApple OSS Distributions int options = uap->options;
3448*94d3b452SApple OSS Distributions int arg2 = uap->affinity;
3449*94d3b452SApple OSS Distributions int arg3 = uap->prio;
3450*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
3451*94d3b452SApple OSS Distributions int error = 0;
3452*94d3b452SApple OSS Distributions
3453*94d3b452SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
3454*94d3b452SApple OSS Distributions return EINVAL;
3455*94d3b452SApple OSS Distributions }
3456*94d3b452SApple OSS Distributions
3457*94d3b452SApple OSS Distributions switch (options) {
3458*94d3b452SApple OSS Distributions case WQOPS_QUEUE_NEWSPISUPP: {
3459*94d3b452SApple OSS Distributions /*
3460*94d3b452SApple OSS Distributions * arg2 = offset of serialno into dispatch queue
3461*94d3b452SApple OSS Distributions * arg3 = kevent support
3462*94d3b452SApple OSS Distributions */
3463*94d3b452SApple OSS Distributions int offset = arg2;
3464*94d3b452SApple OSS Distributions if (arg3 & 0x01) {
3465*94d3b452SApple OSS Distributions // If we get here, then userspace has indicated support for kevent delivery.
3466*94d3b452SApple OSS Distributions }
3467*94d3b452SApple OSS Distributions
3468*94d3b452SApple OSS Distributions p->p_dispatchqueue_serialno_offset = (uint64_t)offset;
3469*94d3b452SApple OSS Distributions break;
3470*94d3b452SApple OSS Distributions }
3471*94d3b452SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS: {
3472*94d3b452SApple OSS Distributions /*
3473*94d3b452SApple OSS Distributions * arg2 = number of threads to start
3474*94d3b452SApple OSS Distributions * arg3 = priority
3475*94d3b452SApple OSS Distributions */
3476*94d3b452SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, false);
3477*94d3b452SApple OSS Distributions break;
3478*94d3b452SApple OSS Distributions }
3479*94d3b452SApple OSS Distributions /* For requesting threads for the cooperative pool */
3480*94d3b452SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS2: {
3481*94d3b452SApple OSS Distributions /*
3482*94d3b452SApple OSS Distributions * arg2 = number of threads to start
3483*94d3b452SApple OSS Distributions * arg3 = priority
3484*94d3b452SApple OSS Distributions */
3485*94d3b452SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, true);
3486*94d3b452SApple OSS Distributions break;
3487*94d3b452SApple OSS Distributions }
3488*94d3b452SApple OSS Distributions case WQOPS_SET_EVENT_MANAGER_PRIORITY: {
3489*94d3b452SApple OSS Distributions /*
3490*94d3b452SApple OSS Distributions * arg2 = priority for the manager thread
3491*94d3b452SApple OSS Distributions *
3492*94d3b452SApple OSS Distributions * if _PTHREAD_PRIORITY_SCHED_PRI_FLAG is set,
3493*94d3b452SApple OSS Distributions * the low bits of the value contains a scheduling priority
3494*94d3b452SApple OSS Distributions * instead of a QOS value
3495*94d3b452SApple OSS Distributions */
3496*94d3b452SApple OSS Distributions pthread_priority_t pri = arg2;
3497*94d3b452SApple OSS Distributions
3498*94d3b452SApple OSS Distributions if (wq == NULL) {
3499*94d3b452SApple OSS Distributions error = EINVAL;
3500*94d3b452SApple OSS Distributions break;
3501*94d3b452SApple OSS Distributions }
3502*94d3b452SApple OSS Distributions
3503*94d3b452SApple OSS Distributions /*
3504*94d3b452SApple OSS Distributions * Normalize the incoming priority so that it is ordered numerically.
3505*94d3b452SApple OSS Distributions */
3506*94d3b452SApple OSS Distributions if (_pthread_priority_has_sched_pri(pri)) {
3507*94d3b452SApple OSS Distributions pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
3508*94d3b452SApple OSS Distributions _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
3509*94d3b452SApple OSS Distributions } else {
3510*94d3b452SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pri);
3511*94d3b452SApple OSS Distributions int relpri = _pthread_priority_relpri(pri);
3512*94d3b452SApple OSS Distributions if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
3513*94d3b452SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
3514*94d3b452SApple OSS Distributions error = EINVAL;
3515*94d3b452SApple OSS Distributions break;
3516*94d3b452SApple OSS Distributions }
3517*94d3b452SApple OSS Distributions pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
3518*94d3b452SApple OSS Distributions }
3519*94d3b452SApple OSS Distributions
3520*94d3b452SApple OSS Distributions /*
3521*94d3b452SApple OSS Distributions * If userspace passes a scheduling priority, that wins over any QoS.
3522*94d3b452SApple OSS Distributions * Userspace should takes care not to lower the priority this way.
3523*94d3b452SApple OSS Distributions */
3524*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3525*94d3b452SApple OSS Distributions if (wq->wq_event_manager_priority < (uint32_t)pri) {
3526*94d3b452SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pri;
3527*94d3b452SApple OSS Distributions }
3528*94d3b452SApple OSS Distributions workq_unlock(wq);
3529*94d3b452SApple OSS Distributions break;
3530*94d3b452SApple OSS Distributions }
3531*94d3b452SApple OSS Distributions case WQOPS_THREAD_KEVENT_RETURN:
3532*94d3b452SApple OSS Distributions case WQOPS_THREAD_WORKLOOP_RETURN:
3533*94d3b452SApple OSS Distributions case WQOPS_THREAD_RETURN: {
3534*94d3b452SApple OSS Distributions error = workq_thread_return(p, uap, wq);
3535*94d3b452SApple OSS Distributions break;
3536*94d3b452SApple OSS Distributions }
3537*94d3b452SApple OSS Distributions
3538*94d3b452SApple OSS Distributions case WQOPS_SHOULD_NARROW: {
3539*94d3b452SApple OSS Distributions /*
3540*94d3b452SApple OSS Distributions * arg2 = priority to test
3541*94d3b452SApple OSS Distributions * arg3 = unused
3542*94d3b452SApple OSS Distributions */
3543*94d3b452SApple OSS Distributions thread_t th = current_thread();
3544*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3545*94d3b452SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3546*94d3b452SApple OSS Distributions (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
3547*94d3b452SApple OSS Distributions error = EINVAL;
3548*94d3b452SApple OSS Distributions break;
3549*94d3b452SApple OSS Distributions }
3550*94d3b452SApple OSS Distributions
3551*94d3b452SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(arg2);
3552*94d3b452SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3553*94d3b452SApple OSS Distributions error = EINVAL;
3554*94d3b452SApple OSS Distributions break;
3555*94d3b452SApple OSS Distributions }
3556*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3557*94d3b452SApple OSS Distributions bool should_narrow = !workq_constrained_allowance(wq, qos, uth, false);
3558*94d3b452SApple OSS Distributions workq_unlock(wq);
3559*94d3b452SApple OSS Distributions
3560*94d3b452SApple OSS Distributions *retval = should_narrow;
3561*94d3b452SApple OSS Distributions break;
3562*94d3b452SApple OSS Distributions }
3563*94d3b452SApple OSS Distributions case WQOPS_SETUP_DISPATCH: {
3564*94d3b452SApple OSS Distributions /*
3565*94d3b452SApple OSS Distributions * item = pointer to workq_dispatch_config structure
3566*94d3b452SApple OSS Distributions * arg2 = sizeof(item)
3567*94d3b452SApple OSS Distributions */
3568*94d3b452SApple OSS Distributions struct workq_dispatch_config cfg;
3569*94d3b452SApple OSS Distributions bzero(&cfg, sizeof(cfg));
3570*94d3b452SApple OSS Distributions
3571*94d3b452SApple OSS Distributions error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
3572*94d3b452SApple OSS Distributions if (error) {
3573*94d3b452SApple OSS Distributions break;
3574*94d3b452SApple OSS Distributions }
3575*94d3b452SApple OSS Distributions
3576*94d3b452SApple OSS Distributions if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
3577*94d3b452SApple OSS Distributions cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
3578*94d3b452SApple OSS Distributions error = ENOTSUP;
3579*94d3b452SApple OSS Distributions break;
3580*94d3b452SApple OSS Distributions }
3581*94d3b452SApple OSS Distributions
3582*94d3b452SApple OSS Distributions /* Load fields from version 1 */
3583*94d3b452SApple OSS Distributions p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
3584*94d3b452SApple OSS Distributions
3585*94d3b452SApple OSS Distributions /* Load fields from version 2 */
3586*94d3b452SApple OSS Distributions if (cfg.wdc_version >= 2) {
3587*94d3b452SApple OSS Distributions p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
3588*94d3b452SApple OSS Distributions }
3589*94d3b452SApple OSS Distributions
3590*94d3b452SApple OSS Distributions break;
3591*94d3b452SApple OSS Distributions }
3592*94d3b452SApple OSS Distributions default:
3593*94d3b452SApple OSS Distributions error = EINVAL;
3594*94d3b452SApple OSS Distributions break;
3595*94d3b452SApple OSS Distributions }
3596*94d3b452SApple OSS Distributions
3597*94d3b452SApple OSS Distributions return error;
3598*94d3b452SApple OSS Distributions }
3599*94d3b452SApple OSS Distributions
3600*94d3b452SApple OSS Distributions /*
3601*94d3b452SApple OSS Distributions * We have no work to do, park ourselves on the idle list.
3602*94d3b452SApple OSS Distributions *
3603*94d3b452SApple OSS Distributions * Consumes the workqueue lock and does not return.
3604*94d3b452SApple OSS Distributions */
3605*94d3b452SApple OSS Distributions __attribute__((noreturn, noinline))
3606*94d3b452SApple OSS Distributions static void
workq_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)3607*94d3b452SApple OSS Distributions workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
3608*94d3b452SApple OSS Distributions uint32_t setup_flags)
3609*94d3b452SApple OSS Distributions {
3610*94d3b452SApple OSS Distributions assert(uth == current_uthread());
3611*94d3b452SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3612*94d3b452SApple OSS Distributions workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
3613*94d3b452SApple OSS Distributions
3614*94d3b452SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
3615*94d3b452SApple OSS Distributions
3616*94d3b452SApple OSS Distributions #if CONFIG_PREADOPT_TG
3617*94d3b452SApple OSS Distributions /* Clear the preadoption thread group on the thread.
3618*94d3b452SApple OSS Distributions *
3619*94d3b452SApple OSS Distributions * Case 1:
3620*94d3b452SApple OSS Distributions * Creator thread which never picked up a thread request. We set a
3621*94d3b452SApple OSS Distributions * preadoption thread group on creator threads but if it never picked
3622*94d3b452SApple OSS Distributions * up a thread request and didn't go to userspace, then the thread will
3623*94d3b452SApple OSS Distributions * park with a preadoption thread group but no explicitly adopted
3624*94d3b452SApple OSS Distributions * voucher or work interval.
3625*94d3b452SApple OSS Distributions *
3626*94d3b452SApple OSS Distributions * We drop the preadoption thread group here before proceeding to park.
3627*94d3b452SApple OSS Distributions * Note - we may get preempted when we drop the workq lock below.
3628*94d3b452SApple OSS Distributions *
3629*94d3b452SApple OSS Distributions * Case 2:
3630*94d3b452SApple OSS Distributions * Thread picked up a thread request and bound to it and returned back
3631*94d3b452SApple OSS Distributions * from userspace and is parking. At this point, preadoption thread
3632*94d3b452SApple OSS Distributions * group should be NULL since the thread has unbound from the thread
3633*94d3b452SApple OSS Distributions * request. So this operation should be a no-op.
3634*94d3b452SApple OSS Distributions */
3635*94d3b452SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
3636*94d3b452SApple OSS Distributions #endif
3637*94d3b452SApple OSS Distributions
3638*94d3b452SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
3639*94d3b452SApple OSS Distributions !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
3640*94d3b452SApple OSS Distributions workq_unlock(wq);
3641*94d3b452SApple OSS Distributions
3642*94d3b452SApple OSS Distributions /*
3643*94d3b452SApple OSS Distributions * workq_push_idle_thread() will unset `has_stack`
3644*94d3b452SApple OSS Distributions * if it wants us to free the stack before parking.
3645*94d3b452SApple OSS Distributions */
3646*94d3b452SApple OSS Distributions if (!uth->uu_save.uus_workq_park_data.has_stack) {
3647*94d3b452SApple OSS Distributions pthread_functions->workq_markfree_threadstack(p,
3648*94d3b452SApple OSS Distributions get_machthread(uth), get_task_map(proc_task(p)),
3649*94d3b452SApple OSS Distributions uth->uu_workq_stackaddr);
3650*94d3b452SApple OSS Distributions }
3651*94d3b452SApple OSS Distributions
3652*94d3b452SApple OSS Distributions /*
3653*94d3b452SApple OSS Distributions * When we remove the voucher from the thread, we may lose our importance
3654*94d3b452SApple OSS Distributions * causing us to get preempted, so we do this after putting the thread on
3655*94d3b452SApple OSS Distributions * the idle list. Then, when we get our importance back we'll be able to
3656*94d3b452SApple OSS Distributions * use this thread from e.g. the kevent call out to deliver a boosting
3657*94d3b452SApple OSS Distributions * message.
3658*94d3b452SApple OSS Distributions *
3659*94d3b452SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
3660*94d3b452SApple OSS Distributions * thread since this thread could have become the creator again and
3661*94d3b452SApple OSS Distributions * perhaps acquired a preadoption thread group.
3662*94d3b452SApple OSS Distributions */
3663*94d3b452SApple OSS Distributions __assert_only kern_return_t kr;
3664*94d3b452SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3665*94d3b452SApple OSS Distributions assert(kr == KERN_SUCCESS);
3666*94d3b452SApple OSS Distributions
3667*94d3b452SApple OSS Distributions workq_lock_spin(wq);
3668*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
3669*94d3b452SApple OSS Distributions setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
3670*94d3b452SApple OSS Distributions }
3671*94d3b452SApple OSS Distributions
3672*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3673*94d3b452SApple OSS Distributions
3674*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
3675*94d3b452SApple OSS Distributions /*
3676*94d3b452SApple OSS Distributions * While we'd dropped the lock to unset our voucher, someone came
3677*94d3b452SApple OSS Distributions * around and made us runnable. But because we weren't waiting on the
3678*94d3b452SApple OSS Distributions * event their thread_wakeup() was ineffectual. To correct for that,
3679*94d3b452SApple OSS Distributions * we just run the continuation ourselves.
3680*94d3b452SApple OSS Distributions */
3681*94d3b452SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
3682*94d3b452SApple OSS Distributions __builtin_unreachable();
3683*94d3b452SApple OSS Distributions }
3684*94d3b452SApple OSS Distributions
3685*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3686*94d3b452SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
3687*94d3b452SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
3688*94d3b452SApple OSS Distributions __builtin_unreachable();
3689*94d3b452SApple OSS Distributions }
3690*94d3b452SApple OSS Distributions
3691*94d3b452SApple OSS Distributions /* Disarm the workqueue quantum since the thread is now idle */
3692*94d3b452SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3693*94d3b452SApple OSS Distributions
3694*94d3b452SApple OSS Distributions thread_set_pending_block_hint(get_machthread(uth), kThreadWaitParkedWorkQueue);
3695*94d3b452SApple OSS Distributions assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
3696*94d3b452SApple OSS Distributions workq_unlock(wq);
3697*94d3b452SApple OSS Distributions thread_block(workq_unpark_continue);
3698*94d3b452SApple OSS Distributions __builtin_unreachable();
3699*94d3b452SApple OSS Distributions }
3700*94d3b452SApple OSS Distributions
3701*94d3b452SApple OSS Distributions static inline bool
workq_may_start_event_mgr_thread(struct workqueue * wq,struct uthread * uth)3702*94d3b452SApple OSS Distributions workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth)
3703*94d3b452SApple OSS Distributions {
3704*94d3b452SApple OSS Distributions /*
3705*94d3b452SApple OSS Distributions * There's an event manager request and either:
3706*94d3b452SApple OSS Distributions * - no event manager currently running
3707*94d3b452SApple OSS Distributions * - we are re-using the event manager
3708*94d3b452SApple OSS Distributions */
3709*94d3b452SApple OSS Distributions return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
3710*94d3b452SApple OSS Distributions (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
3711*94d3b452SApple OSS Distributions }
3712*94d3b452SApple OSS Distributions
3713*94d3b452SApple OSS Distributions static uint32_t
workq_constrained_allowance(struct workqueue * wq,thread_qos_t at_qos,struct uthread * uth,bool may_start_timer)3714*94d3b452SApple OSS Distributions workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
3715*94d3b452SApple OSS Distributions struct uthread *uth, bool may_start_timer)
3716*94d3b452SApple OSS Distributions {
3717*94d3b452SApple OSS Distributions assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
3718*94d3b452SApple OSS Distributions uint32_t count = 0;
3719*94d3b452SApple OSS Distributions
3720*94d3b452SApple OSS Distributions uint32_t max_count = wq->wq_constrained_threads_scheduled;
3721*94d3b452SApple OSS Distributions if (uth && workq_thread_is_nonovercommit(uth)) {
3722*94d3b452SApple OSS Distributions /*
3723*94d3b452SApple OSS Distributions * don't count the current thread as scheduled
3724*94d3b452SApple OSS Distributions */
3725*94d3b452SApple OSS Distributions assert(max_count > 0);
3726*94d3b452SApple OSS Distributions max_count--;
3727*94d3b452SApple OSS Distributions }
3728*94d3b452SApple OSS Distributions if (max_count >= wq_max_constrained_threads) {
3729*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
3730*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled,
3731*94d3b452SApple OSS Distributions wq_max_constrained_threads);
3732*94d3b452SApple OSS Distributions /*
3733*94d3b452SApple OSS Distributions * we need 1 or more constrained threads to return to the kernel before
3734*94d3b452SApple OSS Distributions * we can dispatch additional work
3735*94d3b452SApple OSS Distributions */
3736*94d3b452SApple OSS Distributions return 0;
3737*94d3b452SApple OSS Distributions }
3738*94d3b452SApple OSS Distributions max_count -= wq_max_constrained_threads;
3739*94d3b452SApple OSS Distributions
3740*94d3b452SApple OSS Distributions /*
3741*94d3b452SApple OSS Distributions * Compute a metric for many how many threads are active. We find the
3742*94d3b452SApple OSS Distributions * highest priority request outstanding and then add up the number of active
3743*94d3b452SApple OSS Distributions * threads in that and all higher-priority buckets. We'll also add any
3744*94d3b452SApple OSS Distributions * "busy" threads which are not currently active but blocked recently enough
3745*94d3b452SApple OSS Distributions * that we can't be sure that they won't be unblocked soon and start
3746*94d3b452SApple OSS Distributions * being active again.
3747*94d3b452SApple OSS Distributions *
3748*94d3b452SApple OSS Distributions * We'll then compare this metric to our max concurrency to decide whether
3749*94d3b452SApple OSS Distributions * to add a new thread.
3750*94d3b452SApple OSS Distributions */
3751*94d3b452SApple OSS Distributions
3752*94d3b452SApple OSS Distributions uint32_t busycount, thactive_count;
3753*94d3b452SApple OSS Distributions
3754*94d3b452SApple OSS Distributions thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
3755*94d3b452SApple OSS Distributions at_qos, &busycount, NULL);
3756*94d3b452SApple OSS Distributions
3757*94d3b452SApple OSS Distributions if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
3758*94d3b452SApple OSS Distributions at_qos <= uth->uu_workq_pri.qos_bucket) {
3759*94d3b452SApple OSS Distributions /*
3760*94d3b452SApple OSS Distributions * Don't count this thread as currently active, but only if it's not
3761*94d3b452SApple OSS Distributions * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
3762*94d3b452SApple OSS Distributions * managers.
3763*94d3b452SApple OSS Distributions */
3764*94d3b452SApple OSS Distributions assert(thactive_count > 0);
3765*94d3b452SApple OSS Distributions thactive_count--;
3766*94d3b452SApple OSS Distributions }
3767*94d3b452SApple OSS Distributions
3768*94d3b452SApple OSS Distributions count = wq_max_parallelism[_wq_bucket(at_qos)];
3769*94d3b452SApple OSS Distributions if (count > thactive_count + busycount) {
3770*94d3b452SApple OSS Distributions count -= thactive_count + busycount;
3771*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
3772*94d3b452SApple OSS Distributions thactive_count, busycount);
3773*94d3b452SApple OSS Distributions return MIN(count, max_count);
3774*94d3b452SApple OSS Distributions } else {
3775*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
3776*94d3b452SApple OSS Distributions thactive_count, busycount);
3777*94d3b452SApple OSS Distributions }
3778*94d3b452SApple OSS Distributions
3779*94d3b452SApple OSS Distributions if (may_start_timer) {
3780*94d3b452SApple OSS Distributions /*
3781*94d3b452SApple OSS Distributions * If this is called from the add timer, we won't have another timer
3782*94d3b452SApple OSS Distributions * fire when the thread exits the "busy" state, so rearm the timer.
3783*94d3b452SApple OSS Distributions */
3784*94d3b452SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
3785*94d3b452SApple OSS Distributions }
3786*94d3b452SApple OSS Distributions
3787*94d3b452SApple OSS Distributions return 0;
3788*94d3b452SApple OSS Distributions }
3789*94d3b452SApple OSS Distributions
3790*94d3b452SApple OSS Distributions static bool
workq_threadreq_admissible(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req)3791*94d3b452SApple OSS Distributions workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
3792*94d3b452SApple OSS Distributions workq_threadreq_t req)
3793*94d3b452SApple OSS Distributions {
3794*94d3b452SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
3795*94d3b452SApple OSS Distributions return workq_may_start_event_mgr_thread(wq, uth);
3796*94d3b452SApple OSS Distributions }
3797*94d3b452SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
3798*94d3b452SApple OSS Distributions return workq_cooperative_allowance(wq, req->tr_qos, uth, true);
3799*94d3b452SApple OSS Distributions }
3800*94d3b452SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3801*94d3b452SApple OSS Distributions return workq_constrained_allowance(wq, req->tr_qos, uth, true);
3802*94d3b452SApple OSS Distributions }
3803*94d3b452SApple OSS Distributions
3804*94d3b452SApple OSS Distributions return true;
3805*94d3b452SApple OSS Distributions }
3806*94d3b452SApple OSS Distributions
3807*94d3b452SApple OSS Distributions /*
3808*94d3b452SApple OSS Distributions * Called from the context of selecting thread requests for threads returning
3809*94d3b452SApple OSS Distributions * from userspace or creator thread
3810*94d3b452SApple OSS Distributions */
3811*94d3b452SApple OSS Distributions static workq_threadreq_t
workq_cooperative_queue_best_req(struct workqueue * wq,struct uthread * uth)3812*94d3b452SApple OSS Distributions workq_cooperative_queue_best_req(struct workqueue *wq, struct uthread *uth)
3813*94d3b452SApple OSS Distributions {
3814*94d3b452SApple OSS Distributions workq_lock_held(wq);
3815*94d3b452SApple OSS Distributions
3816*94d3b452SApple OSS Distributions /*
3817*94d3b452SApple OSS Distributions * If the current thread is cooperative, we need to exclude it as part of
3818*94d3b452SApple OSS Distributions * cooperative schedule count since this thread is looking for a new
3819*94d3b452SApple OSS Distributions * request. Change in the schedule count for cooperative pool therefore
3820*94d3b452SApple OSS Distributions * requires us to reeevaluate the next best request for it.
3821*94d3b452SApple OSS Distributions */
3822*94d3b452SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
3823*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
3824*94d3b452SApple OSS Distributions
3825*94d3b452SApple OSS Distributions (void) _wq_cooperative_queue_refresh_best_req_qos(wq);
3826*94d3b452SApple OSS Distributions
3827*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
3828*94d3b452SApple OSS Distributions } else {
3829*94d3b452SApple OSS Distributions /*
3830*94d3b452SApple OSS Distributions * The old value that was already precomputed should be safe to use -
3831*94d3b452SApple OSS Distributions * add an assert that asserts that the best req QoS doesn't change in
3832*94d3b452SApple OSS Distributions * this case
3833*94d3b452SApple OSS Distributions */
3834*94d3b452SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
3835*94d3b452SApple OSS Distributions }
3836*94d3b452SApple OSS Distributions
3837*94d3b452SApple OSS Distributions thread_qos_t qos = wq->wq_cooperative_queue_best_req_qos;
3838*94d3b452SApple OSS Distributions
3839*94d3b452SApple OSS Distributions /* There are no eligible requests in the cooperative pool */
3840*94d3b452SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3841*94d3b452SApple OSS Distributions return NULL;
3842*94d3b452SApple OSS Distributions }
3843*94d3b452SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_ABOVEUI);
3844*94d3b452SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_MANAGER);
3845*94d3b452SApple OSS Distributions
3846*94d3b452SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
3847*94d3b452SApple OSS Distributions assert(!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket]));
3848*94d3b452SApple OSS Distributions
3849*94d3b452SApple OSS Distributions return STAILQ_FIRST(&wq->wq_cooperative_queue[bucket]);
3850*94d3b452SApple OSS Distributions }
3851*94d3b452SApple OSS Distributions
3852*94d3b452SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue * wq)3853*94d3b452SApple OSS Distributions workq_threadreq_select_for_creator(struct workqueue *wq)
3854*94d3b452SApple OSS Distributions {
3855*94d3b452SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
3856*94d3b452SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
3857*94d3b452SApple OSS Distributions uint8_t pri = 0;
3858*94d3b452SApple OSS Distributions
3859*94d3b452SApple OSS Distributions /*
3860*94d3b452SApple OSS Distributions * Compute the best priority request, and ignore the turnstile for now
3861*94d3b452SApple OSS Distributions */
3862*94d3b452SApple OSS Distributions
3863*94d3b452SApple OSS Distributions req_pri = priority_queue_max(&wq->wq_special_queue,
3864*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
3865*94d3b452SApple OSS Distributions if (req_pri) {
3866*94d3b452SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
3867*94d3b452SApple OSS Distributions &req_pri->tr_entry);
3868*94d3b452SApple OSS Distributions }
3869*94d3b452SApple OSS Distributions
3870*94d3b452SApple OSS Distributions /*
3871*94d3b452SApple OSS Distributions * Handle the manager thread request. The special queue might yield
3872*94d3b452SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
3873*94d3b452SApple OSS Distributions */
3874*94d3b452SApple OSS Distributions
3875*94d3b452SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
3876*94d3b452SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
3877*94d3b452SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
3878*94d3b452SApple OSS Distributions
3879*94d3b452SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
3880*94d3b452SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
3881*94d3b452SApple OSS Distributions } else {
3882*94d3b452SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
3883*94d3b452SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
3884*94d3b452SApple OSS Distributions }
3885*94d3b452SApple OSS Distributions
3886*94d3b452SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
3887*94d3b452SApple OSS Distributions }
3888*94d3b452SApple OSS Distributions
3889*94d3b452SApple OSS Distributions /*
3890*94d3b452SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
3891*94d3b452SApple OSS Distributions *
3892*94d3b452SApple OSS Distributions * Start by comparing the overcommit and the cooperative pool
3893*94d3b452SApple OSS Distributions */
3894*94d3b452SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
3895*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
3896*94d3b452SApple OSS Distributions if (req_qos) {
3897*94d3b452SApple OSS Distributions qos = req_qos->tr_qos;
3898*94d3b452SApple OSS Distributions }
3899*94d3b452SApple OSS Distributions
3900*94d3b452SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, NULL);
3901*94d3b452SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
3902*94d3b452SApple OSS Distributions /*
3903*94d3b452SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
3904*94d3b452SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
3905*94d3b452SApple OSS Distributions * cooperative.
3906*94d3b452SApple OSS Distributions *
3907*94d3b452SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
3908*94d3b452SApple OSS Distributions */
3909*94d3b452SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, NULL, true)) {
3910*94d3b452SApple OSS Distributions req_qos = req_tmp;
3911*94d3b452SApple OSS Distributions qos = req_qos->tr_qos;
3912*94d3b452SApple OSS Distributions }
3913*94d3b452SApple OSS Distributions }
3914*94d3b452SApple OSS Distributions
3915*94d3b452SApple OSS Distributions /*
3916*94d3b452SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
3917*94d3b452SApple OSS Distributions * pool - and compare it with the constrained pool
3918*94d3b452SApple OSS Distributions */
3919*94d3b452SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
3920*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
3921*94d3b452SApple OSS Distributions
3922*94d3b452SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
3923*94d3b452SApple OSS Distributions /*
3924*94d3b452SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
3925*94d3b452SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
3926*94d3b452SApple OSS Distributions */
3927*94d3b452SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
3928*94d3b452SApple OSS Distributions return req_pri;
3929*94d3b452SApple OSS Distributions }
3930*94d3b452SApple OSS Distributions
3931*94d3b452SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, NULL, true)) {
3932*94d3b452SApple OSS Distributions /*
3933*94d3b452SApple OSS Distributions * If the constrained thread request is the best one and passes
3934*94d3b452SApple OSS Distributions * the admission check, pick it.
3935*94d3b452SApple OSS Distributions */
3936*94d3b452SApple OSS Distributions return req_tmp;
3937*94d3b452SApple OSS Distributions }
3938*94d3b452SApple OSS Distributions }
3939*94d3b452SApple OSS Distributions
3940*94d3b452SApple OSS Distributions /*
3941*94d3b452SApple OSS Distributions * Compare the best of the QoS world with the priority
3942*94d3b452SApple OSS Distributions */
3943*94d3b452SApple OSS Distributions if (pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
3944*94d3b452SApple OSS Distributions return req_pri;
3945*94d3b452SApple OSS Distributions }
3946*94d3b452SApple OSS Distributions
3947*94d3b452SApple OSS Distributions if (req_qos) {
3948*94d3b452SApple OSS Distributions return req_qos;
3949*94d3b452SApple OSS Distributions }
3950*94d3b452SApple OSS Distributions
3951*94d3b452SApple OSS Distributions /*
3952*94d3b452SApple OSS Distributions * If we had no eligible request but we have a turnstile push,
3953*94d3b452SApple OSS Distributions * it must be a non overcommit thread request that failed
3954*94d3b452SApple OSS Distributions * the admission check.
3955*94d3b452SApple OSS Distributions *
3956*94d3b452SApple OSS Distributions * Just fake a BG thread request so that if the push stops the creator
3957*94d3b452SApple OSS Distributions * priority just drops to 4.
3958*94d3b452SApple OSS Distributions */
3959*94d3b452SApple OSS Distributions if (turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, NULL)) {
3960*94d3b452SApple OSS Distributions static struct workq_threadreq_s workq_sync_push_fake_req = {
3961*94d3b452SApple OSS Distributions .tr_qos = THREAD_QOS_BACKGROUND,
3962*94d3b452SApple OSS Distributions };
3963*94d3b452SApple OSS Distributions
3964*94d3b452SApple OSS Distributions return &workq_sync_push_fake_req;
3965*94d3b452SApple OSS Distributions }
3966*94d3b452SApple OSS Distributions
3967*94d3b452SApple OSS Distributions return NULL;
3968*94d3b452SApple OSS Distributions }
3969*94d3b452SApple OSS Distributions
3970*94d3b452SApple OSS Distributions /*
3971*94d3b452SApple OSS Distributions * Returns true if this caused a change in the schedule counts of the
3972*94d3b452SApple OSS Distributions * cooperative pool
3973*94d3b452SApple OSS Distributions */
3974*94d3b452SApple OSS Distributions static bool
workq_adjust_cooperative_constrained_schedule_counts(struct workqueue * wq,struct uthread * uth,thread_qos_t old_thread_qos,workq_tr_flags_t tr_flags)3975*94d3b452SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(struct workqueue *wq,
3976*94d3b452SApple OSS Distributions struct uthread *uth, thread_qos_t old_thread_qos, workq_tr_flags_t tr_flags)
3977*94d3b452SApple OSS Distributions {
3978*94d3b452SApple OSS Distributions workq_lock_held(wq);
3979*94d3b452SApple OSS Distributions
3980*94d3b452SApple OSS Distributions /*
3981*94d3b452SApple OSS Distributions * Row: thread type
3982*94d3b452SApple OSS Distributions * Column: Request type
3983*94d3b452SApple OSS Distributions *
3984*94d3b452SApple OSS Distributions * overcommit non-overcommit cooperative
3985*94d3b452SApple OSS Distributions * overcommit X case 1 case 2
3986*94d3b452SApple OSS Distributions * cooperative case 3 case 4 case 5
3987*94d3b452SApple OSS Distributions * non-overcommit case 6 X case 7
3988*94d3b452SApple OSS Distributions *
3989*94d3b452SApple OSS Distributions * Move the thread to the right bucket depending on what state it currently
3990*94d3b452SApple OSS Distributions * has and what state the thread req it picks, is going to have.
3991*94d3b452SApple OSS Distributions *
3992*94d3b452SApple OSS Distributions * Note that the creator thread is an overcommit thread.
3993*94d3b452SApple OSS Distributions */
3994*94d3b452SApple OSS Distributions thread_qos_t new_thread_qos = uth->uu_workq_pri.qos_req;
3995*94d3b452SApple OSS Distributions
3996*94d3b452SApple OSS Distributions /*
3997*94d3b452SApple OSS Distributions * Anytime a cooperative bucket's schedule count changes, we need to
3998*94d3b452SApple OSS Distributions * potentially refresh the next best QoS for that pool when we determine
3999*94d3b452SApple OSS Distributions * the next request for the creator
4000*94d3b452SApple OSS Distributions */
4001*94d3b452SApple OSS Distributions bool cooperative_pool_sched_count_changed = false;
4002*94d3b452SApple OSS Distributions
4003*94d3b452SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
4004*94d3b452SApple OSS Distributions if (workq_tr_is_nonovercommit(tr_flags)) {
4005*94d3b452SApple OSS Distributions // Case 1: thread is overcommit, req is non-overcommit
4006*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4007*94d3b452SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4008*94d3b452SApple OSS Distributions // Case 2: thread is overcommit, req is cooperative
4009*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4010*94d3b452SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4011*94d3b452SApple OSS Distributions }
4012*94d3b452SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
4013*94d3b452SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4014*94d3b452SApple OSS Distributions // Case 3: thread is cooperative, req is overcommit
4015*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4016*94d3b452SApple OSS Distributions } else if (workq_tr_is_nonovercommit(tr_flags)) {
4017*94d3b452SApple OSS Distributions // Case 4: thread is cooperative, req is non-overcommit
4018*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4019*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4020*94d3b452SApple OSS Distributions } else {
4021*94d3b452SApple OSS Distributions // Case 5: thread is cooperative, req is also cooperative
4022*94d3b452SApple OSS Distributions assert(workq_tr_is_cooperative(tr_flags));
4023*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4024*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4025*94d3b452SApple OSS Distributions }
4026*94d3b452SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4027*94d3b452SApple OSS Distributions } else {
4028*94d3b452SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4029*94d3b452SApple OSS Distributions // Case 6: Thread is non-overcommit, req is overcommit
4030*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4031*94d3b452SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4032*94d3b452SApple OSS Distributions // Case 7: Thread is non-overcommit, req is cooperative
4033*94d3b452SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4034*94d3b452SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4035*94d3b452SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4036*94d3b452SApple OSS Distributions }
4037*94d3b452SApple OSS Distributions }
4038*94d3b452SApple OSS Distributions
4039*94d3b452SApple OSS Distributions return cooperative_pool_sched_count_changed;
4040*94d3b452SApple OSS Distributions }
4041*94d3b452SApple OSS Distributions
4042*94d3b452SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select(struct workqueue * wq,struct uthread * uth)4043*94d3b452SApple OSS Distributions workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
4044*94d3b452SApple OSS Distributions {
4045*94d3b452SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4046*94d3b452SApple OSS Distributions uintptr_t proprietor;
4047*94d3b452SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4048*94d3b452SApple OSS Distributions uint8_t pri = 0;
4049*94d3b452SApple OSS Distributions
4050*94d3b452SApple OSS Distributions if (uth == wq->wq_creator) {
4051*94d3b452SApple OSS Distributions uth = NULL;
4052*94d3b452SApple OSS Distributions }
4053*94d3b452SApple OSS Distributions
4054*94d3b452SApple OSS Distributions /*
4055*94d3b452SApple OSS Distributions * Compute the best priority request (special or turnstile)
4056*94d3b452SApple OSS Distributions */
4057*94d3b452SApple OSS Distributions
4058*94d3b452SApple OSS Distributions pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
4059*94d3b452SApple OSS Distributions &proprietor);
4060*94d3b452SApple OSS Distributions if (pri) {
4061*94d3b452SApple OSS Distributions struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
4062*94d3b452SApple OSS Distributions req_pri = &kqwl->kqwl_request;
4063*94d3b452SApple OSS Distributions if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
4064*94d3b452SApple OSS Distributions panic("Invalid thread request (%p) state %d",
4065*94d3b452SApple OSS Distributions req_pri, req_pri->tr_state);
4066*94d3b452SApple OSS Distributions }
4067*94d3b452SApple OSS Distributions } else {
4068*94d3b452SApple OSS Distributions req_pri = NULL;
4069*94d3b452SApple OSS Distributions }
4070*94d3b452SApple OSS Distributions
4071*94d3b452SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_special_queue,
4072*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4073*94d3b452SApple OSS Distributions if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
4074*94d3b452SApple OSS Distributions &req_tmp->tr_entry)) {
4075*94d3b452SApple OSS Distributions req_pri = req_tmp;
4076*94d3b452SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4077*94d3b452SApple OSS Distributions &req_tmp->tr_entry);
4078*94d3b452SApple OSS Distributions }
4079*94d3b452SApple OSS Distributions
4080*94d3b452SApple OSS Distributions /*
4081*94d3b452SApple OSS Distributions * Handle the manager thread request. The special queue might yield
4082*94d3b452SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4083*94d3b452SApple OSS Distributions */
4084*94d3b452SApple OSS Distributions
4085*94d3b452SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4086*94d3b452SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
4087*94d3b452SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4088*94d3b452SApple OSS Distributions
4089*94d3b452SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4090*94d3b452SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4091*94d3b452SApple OSS Distributions } else {
4092*94d3b452SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4093*94d3b452SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4094*94d3b452SApple OSS Distributions }
4095*94d3b452SApple OSS Distributions
4096*94d3b452SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4097*94d3b452SApple OSS Distributions }
4098*94d3b452SApple OSS Distributions
4099*94d3b452SApple OSS Distributions /*
4100*94d3b452SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4101*94d3b452SApple OSS Distributions */
4102*94d3b452SApple OSS Distributions
4103*94d3b452SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4104*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4105*94d3b452SApple OSS Distributions if (req_qos) {
4106*94d3b452SApple OSS Distributions qos = req_qos->tr_qos;
4107*94d3b452SApple OSS Distributions }
4108*94d3b452SApple OSS Distributions
4109*94d3b452SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, uth);
4110*94d3b452SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4111*94d3b452SApple OSS Distributions /*
4112*94d3b452SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4113*94d3b452SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4114*94d3b452SApple OSS Distributions * cooperative.
4115*94d3b452SApple OSS Distributions *
4116*94d3b452SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4117*94d3b452SApple OSS Distributions */
4118*94d3b452SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, uth, true)) {
4119*94d3b452SApple OSS Distributions req_qos = req_tmp;
4120*94d3b452SApple OSS Distributions qos = req_qos->tr_qos;
4121*94d3b452SApple OSS Distributions }
4122*94d3b452SApple OSS Distributions }
4123*94d3b452SApple OSS Distributions
4124*94d3b452SApple OSS Distributions /*
4125*94d3b452SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4126*94d3b452SApple OSS Distributions * pool - and compare it with the constrained pool
4127*94d3b452SApple OSS Distributions */
4128*94d3b452SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4129*94d3b452SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4130*94d3b452SApple OSS Distributions
4131*94d3b452SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4132*94d3b452SApple OSS Distributions /*
4133*94d3b452SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4134*94d3b452SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4135*94d3b452SApple OSS Distributions */
4136*94d3b452SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4137*94d3b452SApple OSS Distributions return req_pri;
4138*94d3b452SApple OSS Distributions }
4139*94d3b452SApple OSS Distributions
4140*94d3b452SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, uth, true)) {
4141*94d3b452SApple OSS Distributions /*
4142*94d3b452SApple OSS Distributions * If the constrained thread request is the best one and passes
4143*94d3b452SApple OSS Distributions * the admission check, pick it.
4144*94d3b452SApple OSS Distributions */
4145*94d3b452SApple OSS Distributions return req_tmp;
4146*94d3b452SApple OSS Distributions }
4147*94d3b452SApple OSS Distributions }
4148*94d3b452SApple OSS Distributions
4149*94d3b452SApple OSS Distributions if (req_pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4150*94d3b452SApple OSS Distributions return req_pri;
4151*94d3b452SApple OSS Distributions }
4152*94d3b452SApple OSS Distributions
4153*94d3b452SApple OSS Distributions return req_qos;
4154*94d3b452SApple OSS Distributions }
4155*94d3b452SApple OSS Distributions
4156*94d3b452SApple OSS Distributions /*
4157*94d3b452SApple OSS Distributions * The creator is an anonymous thread that is counted as scheduled,
4158*94d3b452SApple OSS Distributions * but otherwise without its scheduler callback set or tracked as active
4159*94d3b452SApple OSS Distributions * that is used to make other threads.
4160*94d3b452SApple OSS Distributions *
4161*94d3b452SApple OSS Distributions * When more requests are added or an existing one is hurried along,
4162*94d3b452SApple OSS Distributions * a creator is elected and setup, or the existing one overridden accordingly.
4163*94d3b452SApple OSS Distributions *
4164*94d3b452SApple OSS Distributions * While this creator is in flight, because no request has been dequeued,
4165*94d3b452SApple OSS Distributions * already running threads have a chance at stealing thread requests avoiding
4166*94d3b452SApple OSS Distributions * useless context switches, and the creator once scheduled may not find any
4167*94d3b452SApple OSS Distributions * work to do and will then just park again.
4168*94d3b452SApple OSS Distributions *
4169*94d3b452SApple OSS Distributions * The creator serves the dual purpose of informing the scheduler of work that
4170*94d3b452SApple OSS Distributions * hasn't be materialized as threads yet, and also as a natural pacing mechanism
4171*94d3b452SApple OSS Distributions * for thread creation.
4172*94d3b452SApple OSS Distributions *
4173*94d3b452SApple OSS Distributions * By being anonymous (and not bound to anything) it means that thread requests
4174*94d3b452SApple OSS Distributions * can be stolen from this creator by threads already on core yielding more
4175*94d3b452SApple OSS Distributions * efficient scheduling and reduced context switches.
4176*94d3b452SApple OSS Distributions */
4177*94d3b452SApple OSS Distributions static void
workq_schedule_creator(proc_t p,struct workqueue * wq,workq_kern_threadreq_flags_t flags)4178*94d3b452SApple OSS Distributions workq_schedule_creator(proc_t p, struct workqueue *wq,
4179*94d3b452SApple OSS Distributions workq_kern_threadreq_flags_t flags)
4180*94d3b452SApple OSS Distributions {
4181*94d3b452SApple OSS Distributions workq_threadreq_t req;
4182*94d3b452SApple OSS Distributions struct uthread *uth;
4183*94d3b452SApple OSS Distributions bool needs_wakeup;
4184*94d3b452SApple OSS Distributions
4185*94d3b452SApple OSS Distributions workq_lock_held(wq);
4186*94d3b452SApple OSS Distributions assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
4187*94d3b452SApple OSS Distributions
4188*94d3b452SApple OSS Distributions again:
4189*94d3b452SApple OSS Distributions uth = wq->wq_creator;
4190*94d3b452SApple OSS Distributions
4191*94d3b452SApple OSS Distributions if (!wq->wq_reqcount) {
4192*94d3b452SApple OSS Distributions /*
4193*94d3b452SApple OSS Distributions * There is no thread request left.
4194*94d3b452SApple OSS Distributions *
4195*94d3b452SApple OSS Distributions * If there is a creator, leave everything in place, so that it cleans
4196*94d3b452SApple OSS Distributions * up itself in workq_push_idle_thread().
4197*94d3b452SApple OSS Distributions *
4198*94d3b452SApple OSS Distributions * Else, make sure the turnstile state is reset to no inheritor.
4199*94d3b452SApple OSS Distributions */
4200*94d3b452SApple OSS Distributions if (uth == NULL) {
4201*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4202*94d3b452SApple OSS Distributions }
4203*94d3b452SApple OSS Distributions return;
4204*94d3b452SApple OSS Distributions }
4205*94d3b452SApple OSS Distributions
4206*94d3b452SApple OSS Distributions req = workq_threadreq_select_for_creator(wq);
4207*94d3b452SApple OSS Distributions if (req == NULL) {
4208*94d3b452SApple OSS Distributions /*
4209*94d3b452SApple OSS Distributions * There isn't a thread request that passes the admission check.
4210*94d3b452SApple OSS Distributions *
4211*94d3b452SApple OSS Distributions * If there is a creator, do not touch anything, the creator will sort
4212*94d3b452SApple OSS Distributions * it out when it runs.
4213*94d3b452SApple OSS Distributions *
4214*94d3b452SApple OSS Distributions * Else, set the inheritor to "WORKQ" so that the turnstile propagation
4215*94d3b452SApple OSS Distributions * code calls us if anything changes.
4216*94d3b452SApple OSS Distributions */
4217*94d3b452SApple OSS Distributions if (uth == NULL) {
4218*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
4219*94d3b452SApple OSS Distributions }
4220*94d3b452SApple OSS Distributions return;
4221*94d3b452SApple OSS Distributions }
4222*94d3b452SApple OSS Distributions
4223*94d3b452SApple OSS Distributions
4224*94d3b452SApple OSS Distributions if (uth) {
4225*94d3b452SApple OSS Distributions /*
4226*94d3b452SApple OSS Distributions * We need to maybe override the creator we already have
4227*94d3b452SApple OSS Distributions */
4228*94d3b452SApple OSS Distributions if (workq_thread_needs_priority_change(req, uth)) {
4229*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4230*94d3b452SApple OSS Distributions wq, 1, uthread_tid(uth), req->tr_qos);
4231*94d3b452SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4232*94d3b452SApple OSS Distributions }
4233*94d3b452SApple OSS Distributions assert(wq->wq_inheritor == get_machthread(uth));
4234*94d3b452SApple OSS Distributions } else if (wq->wq_thidlecount) {
4235*94d3b452SApple OSS Distributions /*
4236*94d3b452SApple OSS Distributions * We need to unpark a creator thread
4237*94d3b452SApple OSS Distributions */
4238*94d3b452SApple OSS Distributions wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
4239*94d3b452SApple OSS Distributions &needs_wakeup);
4240*94d3b452SApple OSS Distributions /* Always reset the priorities on the newly chosen creator */
4241*94d3b452SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4242*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, get_machthread(uth),
4243*94d3b452SApple OSS Distributions TURNSTILE_INHERITOR_THREAD);
4244*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4245*94d3b452SApple OSS Distributions wq, 2, uthread_tid(uth), req->tr_qos);
4246*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4247*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields = 0;
4248*94d3b452SApple OSS Distributions if (needs_wakeup) {
4249*94d3b452SApple OSS Distributions workq_thread_wakeup(uth);
4250*94d3b452SApple OSS Distributions }
4251*94d3b452SApple OSS Distributions } else {
4252*94d3b452SApple OSS Distributions /*
4253*94d3b452SApple OSS Distributions * We need to allocate a thread...
4254*94d3b452SApple OSS Distributions */
4255*94d3b452SApple OSS Distributions if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
4256*94d3b452SApple OSS Distributions /* out of threads, just go away */
4257*94d3b452SApple OSS Distributions flags = WORKQ_THREADREQ_NONE;
4258*94d3b452SApple OSS Distributions } else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
4259*94d3b452SApple OSS Distributions act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
4260*94d3b452SApple OSS Distributions } else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
4261*94d3b452SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4262*94d3b452SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
4263*94d3b452SApple OSS Distributions } else if (workq_add_new_idle_thread(p, wq)) {
4264*94d3b452SApple OSS Distributions goto again;
4265*94d3b452SApple OSS Distributions } else {
4266*94d3b452SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4267*94d3b452SApple OSS Distributions }
4268*94d3b452SApple OSS Distributions
4269*94d3b452SApple OSS Distributions /*
4270*94d3b452SApple OSS Distributions * If the current thread is the inheritor:
4271*94d3b452SApple OSS Distributions *
4272*94d3b452SApple OSS Distributions * If we set the AST, then the thread will stay the inheritor until
4273*94d3b452SApple OSS Distributions * either the AST calls workq_kern_threadreq_redrive(), or it parks
4274*94d3b452SApple OSS Distributions * and calls workq_push_idle_thread().
4275*94d3b452SApple OSS Distributions *
4276*94d3b452SApple OSS Distributions * Else, the responsibility of the thread creation is with a thread-call
4277*94d3b452SApple OSS Distributions * and we need to clear the inheritor.
4278*94d3b452SApple OSS Distributions */
4279*94d3b452SApple OSS Distributions if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
4280*94d3b452SApple OSS Distributions wq->wq_inheritor == current_thread()) {
4281*94d3b452SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4282*94d3b452SApple OSS Distributions }
4283*94d3b452SApple OSS Distributions }
4284*94d3b452SApple OSS Distributions }
4285*94d3b452SApple OSS Distributions
4286*94d3b452SApple OSS Distributions /**
4287*94d3b452SApple OSS Distributions * Same as workq_unpark_select_threadreq_or_park_and_unlock,
4288*94d3b452SApple OSS Distributions * but do not allow early binds.
4289*94d3b452SApple OSS Distributions *
4290*94d3b452SApple OSS Distributions * Called with the base pri frozen, will unfreeze it.
4291*94d3b452SApple OSS Distributions */
4292*94d3b452SApple OSS Distributions __attribute__((noreturn, noinline))
4293*94d3b452SApple OSS Distributions static void
workq_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4294*94d3b452SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4295*94d3b452SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4296*94d3b452SApple OSS Distributions {
4297*94d3b452SApple OSS Distributions workq_threadreq_t req = NULL;
4298*94d3b452SApple OSS Distributions bool is_creator = (wq->wq_creator == uth);
4299*94d3b452SApple OSS Distributions bool schedule_creator = false;
4300*94d3b452SApple OSS Distributions
4301*94d3b452SApple OSS Distributions if (__improbable(_wq_exiting(wq))) {
4302*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0);
4303*94d3b452SApple OSS Distributions goto park;
4304*94d3b452SApple OSS Distributions }
4305*94d3b452SApple OSS Distributions
4306*94d3b452SApple OSS Distributions if (wq->wq_reqcount == 0) {
4307*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0);
4308*94d3b452SApple OSS Distributions goto park;
4309*94d3b452SApple OSS Distributions }
4310*94d3b452SApple OSS Distributions
4311*94d3b452SApple OSS Distributions req = workq_threadreq_select(wq, uth);
4312*94d3b452SApple OSS Distributions if (__improbable(req == NULL)) {
4313*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0);
4314*94d3b452SApple OSS Distributions goto park;
4315*94d3b452SApple OSS Distributions }
4316*94d3b452SApple OSS Distributions
4317*94d3b452SApple OSS Distributions struct uu_workq_policy old_pri = uth->uu_workq_pri;
4318*94d3b452SApple OSS Distributions uint8_t tr_flags = req->tr_flags;
4319*94d3b452SApple OSS Distributions struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
4320*94d3b452SApple OSS Distributions
4321*94d3b452SApple OSS Distributions /*
4322*94d3b452SApple OSS Distributions * Attempt to setup ourselves as the new thing to run, moving all priority
4323*94d3b452SApple OSS Distributions * pushes to ourselves.
4324*94d3b452SApple OSS Distributions *
4325*94d3b452SApple OSS Distributions * If the current thread is the creator, then the fact that we are presently
4326*94d3b452SApple OSS Distributions * running is proof that we'll do something useful, so keep going.
4327*94d3b452SApple OSS Distributions *
4328*94d3b452SApple OSS Distributions * For other cases, peek at the AST to know whether the scheduler wants
4329*94d3b452SApple OSS Distributions * to preempt us, if yes, park instead, and move the thread request
4330*94d3b452SApple OSS Distributions * turnstile back to the workqueue.
4331*94d3b452SApple OSS Distributions */
4332*94d3b452SApple OSS Distributions if (req_ts) {
4333*94d3b452SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4334*94d3b452SApple OSS Distributions turnstile_update_inheritor(req_ts, get_machthread(uth),
4335*94d3b452SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
4336*94d3b452SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4337*94d3b452SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4338*94d3b452SApple OSS Distributions });
4339*94d3b452SApple OSS Distributions }
4340*94d3b452SApple OSS Distributions
4341*94d3b452SApple OSS Distributions /* accounting changes of aggregate thscheduled_count and thactive which has
4342*94d3b452SApple OSS Distributions * to be paired with the workq_thread_reset_pri below so that we have
4343*94d3b452SApple OSS Distributions * uth->uu_workq_pri match with thactive.
4344*94d3b452SApple OSS Distributions *
4345*94d3b452SApple OSS Distributions * This is undone when the thread parks */
4346*94d3b452SApple OSS Distributions if (is_creator) {
4347*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
4348*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
4349*94d3b452SApple OSS Distributions wq->wq_creator = NULL;
4350*94d3b452SApple OSS Distributions _wq_thactive_inc(wq, req->tr_qos);
4351*94d3b452SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
4352*94d3b452SApple OSS Distributions } else if (old_pri.qos_bucket != req->tr_qos) {
4353*94d3b452SApple OSS Distributions _wq_thactive_move(wq, old_pri.qos_bucket, req->tr_qos);
4354*94d3b452SApple OSS Distributions }
4355*94d3b452SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4356*94d3b452SApple OSS Distributions
4357*94d3b452SApple OSS Distributions /*
4358*94d3b452SApple OSS Distributions * Make relevant accounting changes for pool specific counts.
4359*94d3b452SApple OSS Distributions *
4360*94d3b452SApple OSS Distributions * The schedule counts changing can affect what the next best request
4361*94d3b452SApple OSS Distributions * for cooperative thread pool is if this request is dequeued.
4362*94d3b452SApple OSS Distributions */
4363*94d3b452SApple OSS Distributions bool cooperative_sched_count_changed =
4364*94d3b452SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(wq, uth,
4365*94d3b452SApple OSS Distributions old_pri.qos_req, tr_flags);
4366*94d3b452SApple OSS Distributions
4367*94d3b452SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4368*94d3b452SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4369*94d3b452SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4370*94d3b452SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_COOPERATIVE);
4371*94d3b452SApple OSS Distributions } else {
4372*94d3b452SApple OSS Distributions workq_thread_set_type(uth, 0);
4373*94d3b452SApple OSS Distributions }
4374*94d3b452SApple OSS Distributions
4375*94d3b452SApple OSS Distributions if (__improbable(thread_unfreeze_base_pri(get_machthread(uth)) && !is_creator)) {
4376*94d3b452SApple OSS Distributions if (req_ts) {
4377*94d3b452SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4378*94d3b452SApple OSS Distributions turnstile_update_inheritor(req_ts, wq->wq_turnstile,
4379*94d3b452SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
4380*94d3b452SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4381*94d3b452SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4382*94d3b452SApple OSS Distributions });
4383*94d3b452SApple OSS Distributions }
4384*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0);
4385*94d3b452SApple OSS Distributions goto park_thawed;
4386*94d3b452SApple OSS Distributions }
4387*94d3b452SApple OSS Distributions
4388*94d3b452SApple OSS Distributions /*
4389*94d3b452SApple OSS Distributions * We passed all checks, dequeue the request, bind to it, and set it up
4390*94d3b452SApple OSS Distributions * to return to user.
4391*94d3b452SApple OSS Distributions */
4392*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4393*94d3b452SApple OSS Distributions workq_trace_req_id(req), tr_flags, 0);
4394*94d3b452SApple OSS Distributions wq->wq_fulfilled++;
4395*94d3b452SApple OSS Distributions schedule_creator = workq_threadreq_dequeue(wq, req,
4396*94d3b452SApple OSS Distributions cooperative_sched_count_changed);
4397*94d3b452SApple OSS Distributions
4398*94d3b452SApple OSS Distributions workq_thread_reset_cpupercent(req, uth);
4399*94d3b452SApple OSS Distributions
4400*94d3b452SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4401*94d3b452SApple OSS Distributions kqueue_threadreq_bind_prepost(p, req, uth);
4402*94d3b452SApple OSS Distributions req = NULL;
4403*94d3b452SApple OSS Distributions } else if (req->tr_count > 0) {
4404*94d3b452SApple OSS Distributions req = NULL;
4405*94d3b452SApple OSS Distributions }
4406*94d3b452SApple OSS Distributions
4407*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4408*94d3b452SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_NEW;
4409*94d3b452SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4410*94d3b452SApple OSS Distributions }
4411*94d3b452SApple OSS Distributions
4412*94d3b452SApple OSS Distributions /* If one of the following is true, call workq_schedule_creator (which also
4413*94d3b452SApple OSS Distributions * adjusts priority of existing creator):
4414*94d3b452SApple OSS Distributions *
4415*94d3b452SApple OSS Distributions * - We are the creator currently so the wq may need a new creator
4416*94d3b452SApple OSS Distributions * - The request we're binding to is the highest priority one, existing
4417*94d3b452SApple OSS Distributions * creator's priority might need to be adjusted to reflect the next
4418*94d3b452SApple OSS Distributions * highest TR
4419*94d3b452SApple OSS Distributions */
4420*94d3b452SApple OSS Distributions if (is_creator || schedule_creator) {
4421*94d3b452SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4422*94d3b452SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
4423*94d3b452SApple OSS Distributions }
4424*94d3b452SApple OSS Distributions
4425*94d3b452SApple OSS Distributions workq_unlock(wq);
4426*94d3b452SApple OSS Distributions
4427*94d3b452SApple OSS Distributions if (req) {
4428*94d3b452SApple OSS Distributions zfree(workq_zone_threadreq, req);
4429*94d3b452SApple OSS Distributions }
4430*94d3b452SApple OSS Distributions
4431*94d3b452SApple OSS Distributions /*
4432*94d3b452SApple OSS Distributions * Run Thread, Run!
4433*94d3b452SApple OSS Distributions */
4434*94d3b452SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
4435*94d3b452SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
4436*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
4437*94d3b452SApple OSS Distributions } else if (workq_tr_is_overcommit(tr_flags)) {
4438*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4439*94d3b452SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4440*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
4441*94d3b452SApple OSS Distributions }
4442*94d3b452SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
4443*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
4444*94d3b452SApple OSS Distributions assert((upcall_flags & WQ_FLAG_THREAD_COOPERATIVE) == 0);
4445*94d3b452SApple OSS Distributions }
4446*94d3b452SApple OSS Distributions
4447*94d3b452SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
4448*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
4449*94d3b452SApple OSS Distributions }
4450*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4451*94d3b452SApple OSS Distributions
4452*94d3b452SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4453*94d3b452SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
4454*94d3b452SApple OSS Distributions } else {
4455*94d3b452SApple OSS Distributions #if CONFIG_PREADOPT_TG
4456*94d3b452SApple OSS Distributions /*
4457*94d3b452SApple OSS Distributions * The thread may have a preadopt thread group on it already because it
4458*94d3b452SApple OSS Distributions * got tagged with it as a creator thread. So we need to make sure to
4459*94d3b452SApple OSS Distributions * clear that since we don't have preadoption for anonymous thread
4460*94d3b452SApple OSS Distributions * requests
4461*94d3b452SApple OSS Distributions */
4462*94d3b452SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
4463*94d3b452SApple OSS Distributions #endif
4464*94d3b452SApple OSS Distributions }
4465*94d3b452SApple OSS Distributions
4466*94d3b452SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4467*94d3b452SApple OSS Distributions __builtin_unreachable();
4468*94d3b452SApple OSS Distributions
4469*94d3b452SApple OSS Distributions park:
4470*94d3b452SApple OSS Distributions thread_unfreeze_base_pri(get_machthread(uth));
4471*94d3b452SApple OSS Distributions park_thawed:
4472*94d3b452SApple OSS Distributions workq_park_and_unlock(p, wq, uth, setup_flags);
4473*94d3b452SApple OSS Distributions }
4474*94d3b452SApple OSS Distributions
4475*94d3b452SApple OSS Distributions /**
4476*94d3b452SApple OSS Distributions * Runs a thread request on a thread
4477*94d3b452SApple OSS Distributions *
4478*94d3b452SApple OSS Distributions * - if thread is THREAD_NULL, will find a thread and run the request there.
4479*94d3b452SApple OSS Distributions * Otherwise, the thread must be the current thread.
4480*94d3b452SApple OSS Distributions *
4481*94d3b452SApple OSS Distributions * - if req is NULL, will find the highest priority request and run that. If
4482*94d3b452SApple OSS Distributions * it is not NULL, it must be a threadreq object in state NEW. If it can not
4483*94d3b452SApple OSS Distributions * be run immediately, it will be enqueued and moved to state QUEUED.
4484*94d3b452SApple OSS Distributions *
4485*94d3b452SApple OSS Distributions * Either way, the thread request object serviced will be moved to state
4486*94d3b452SApple OSS Distributions * BINDING and attached to the uthread.
4487*94d3b452SApple OSS Distributions *
4488*94d3b452SApple OSS Distributions * Should be called with the workqueue lock held. Will drop it.
4489*94d3b452SApple OSS Distributions * Should be called with the base pri not frozen.
4490*94d3b452SApple OSS Distributions */
4491*94d3b452SApple OSS Distributions __attribute__((noreturn, noinline))
4492*94d3b452SApple OSS Distributions static void
workq_unpark_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4493*94d3b452SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4494*94d3b452SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4495*94d3b452SApple OSS Distributions {
4496*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
4497*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4498*94d3b452SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4499*94d3b452SApple OSS Distributions }
4500*94d3b452SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_NEW | UT_WORKQ_EARLY_BOUND);
4501*94d3b452SApple OSS Distributions /*
4502*94d3b452SApple OSS Distributions * This pointer is possibly freed and only used for tracing purposes.
4503*94d3b452SApple OSS Distributions */
4504*94d3b452SApple OSS Distributions workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
4505*94d3b452SApple OSS Distributions workq_unlock(wq);
4506*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4507*94d3b452SApple OSS Distributions VM_KERNEL_ADDRHIDE(req), 0, 0);
4508*94d3b452SApple OSS Distributions (void)req;
4509*94d3b452SApple OSS Distributions
4510*94d3b452SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4511*94d3b452SApple OSS Distributions __builtin_unreachable();
4512*94d3b452SApple OSS Distributions }
4513*94d3b452SApple OSS Distributions
4514*94d3b452SApple OSS Distributions thread_freeze_base_pri(get_machthread(uth));
4515*94d3b452SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
4516*94d3b452SApple OSS Distributions }
4517*94d3b452SApple OSS Distributions
4518*94d3b452SApple OSS Distributions static bool
workq_creator_should_yield(struct workqueue * wq,struct uthread * uth)4519*94d3b452SApple OSS Distributions workq_creator_should_yield(struct workqueue *wq, struct uthread *uth)
4520*94d3b452SApple OSS Distributions {
4521*94d3b452SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
4522*94d3b452SApple OSS Distributions
4523*94d3b452SApple OSS Distributions if (qos >= THREAD_QOS_USER_INTERACTIVE) {
4524*94d3b452SApple OSS Distributions return false;
4525*94d3b452SApple OSS Distributions }
4526*94d3b452SApple OSS Distributions
4527*94d3b452SApple OSS Distributions uint32_t snapshot = uth->uu_save.uus_workq_park_data.fulfilled_snapshot;
4528*94d3b452SApple OSS Distributions if (wq->wq_fulfilled == snapshot) {
4529*94d3b452SApple OSS Distributions return false;
4530*94d3b452SApple OSS Distributions }
4531*94d3b452SApple OSS Distributions
4532*94d3b452SApple OSS Distributions uint32_t cnt = 0, conc = wq_max_parallelism[_wq_bucket(qos)];
4533*94d3b452SApple OSS Distributions if (wq->wq_fulfilled - snapshot > conc) {
4534*94d3b452SApple OSS Distributions /* we fulfilled more than NCPU requests since being dispatched */
4535*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
4536*94d3b452SApple OSS Distributions wq->wq_fulfilled, snapshot);
4537*94d3b452SApple OSS Distributions return true;
4538*94d3b452SApple OSS Distributions }
4539*94d3b452SApple OSS Distributions
4540*94d3b452SApple OSS Distributions for (uint8_t i = _wq_bucket(qos); i < WORKQ_NUM_QOS_BUCKETS; i++) {
4541*94d3b452SApple OSS Distributions cnt += wq->wq_thscheduled_count[i];
4542*94d3b452SApple OSS Distributions }
4543*94d3b452SApple OSS Distributions if (conc <= cnt) {
4544*94d3b452SApple OSS Distributions /* We fulfilled requests and have more than NCPU scheduled threads */
4545*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
4546*94d3b452SApple OSS Distributions wq->wq_fulfilled, snapshot);
4547*94d3b452SApple OSS Distributions return true;
4548*94d3b452SApple OSS Distributions }
4549*94d3b452SApple OSS Distributions
4550*94d3b452SApple OSS Distributions return false;
4551*94d3b452SApple OSS Distributions }
4552*94d3b452SApple OSS Distributions
4553*94d3b452SApple OSS Distributions /**
4554*94d3b452SApple OSS Distributions * parked thread wakes up
4555*94d3b452SApple OSS Distributions */
4556*94d3b452SApple OSS Distributions __attribute__((noreturn, noinline))
4557*94d3b452SApple OSS Distributions static void
workq_unpark_continue(void * parameter __unused,wait_result_t wr __unused)4558*94d3b452SApple OSS Distributions workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
4559*94d3b452SApple OSS Distributions {
4560*94d3b452SApple OSS Distributions thread_t th = current_thread();
4561*94d3b452SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
4562*94d3b452SApple OSS Distributions proc_t p = current_proc();
4563*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
4564*94d3b452SApple OSS Distributions
4565*94d3b452SApple OSS Distributions workq_lock_spin(wq);
4566*94d3b452SApple OSS Distributions
4567*94d3b452SApple OSS Distributions if (wq->wq_creator == uth && workq_creator_should_yield(wq, uth)) {
4568*94d3b452SApple OSS Distributions /*
4569*94d3b452SApple OSS Distributions * If the number of threads we have out are able to keep up with the
4570*94d3b452SApple OSS Distributions * demand, then we should avoid sending this creator thread to
4571*94d3b452SApple OSS Distributions * userspace.
4572*94d3b452SApple OSS Distributions */
4573*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4574*94d3b452SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields++;
4575*94d3b452SApple OSS Distributions workq_unlock(wq);
4576*94d3b452SApple OSS Distributions thread_yield_with_continuation(workq_unpark_continue, NULL);
4577*94d3b452SApple OSS Distributions __builtin_unreachable();
4578*94d3b452SApple OSS Distributions }
4579*94d3b452SApple OSS Distributions
4580*94d3b452SApple OSS Distributions if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
4581*94d3b452SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
4582*94d3b452SApple OSS Distributions __builtin_unreachable();
4583*94d3b452SApple OSS Distributions }
4584*94d3b452SApple OSS Distributions
4585*94d3b452SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
4586*94d3b452SApple OSS Distributions /*
4587*94d3b452SApple OSS Distributions * We were set running, but for the purposes of dying.
4588*94d3b452SApple OSS Distributions */
4589*94d3b452SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
4590*94d3b452SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_NEW) == 0);
4591*94d3b452SApple OSS Distributions } else {
4592*94d3b452SApple OSS Distributions /*
4593*94d3b452SApple OSS Distributions * workaround for <rdar://problem/38647347>,
4594*94d3b452SApple OSS Distributions * in case we do hit userspace, make sure calling
4595*94d3b452SApple OSS Distributions * workq_thread_terminate() does the right thing here,
4596*94d3b452SApple OSS Distributions * and if we never call it, that workq_exit() will too because it sees
4597*94d3b452SApple OSS Distributions * this thread on the runlist.
4598*94d3b452SApple OSS Distributions */
4599*94d3b452SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
4600*94d3b452SApple OSS Distributions wq->wq_thdying_count++;
4601*94d3b452SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
4602*94d3b452SApple OSS Distributions }
4603*94d3b452SApple OSS Distributions
4604*94d3b452SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
4605*94d3b452SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
4606*94d3b452SApple OSS Distributions __builtin_unreachable();
4607*94d3b452SApple OSS Distributions }
4608*94d3b452SApple OSS Distributions
4609*94d3b452SApple OSS Distributions __attribute__((noreturn, noinline))
4610*94d3b452SApple OSS Distributions static void
workq_setup_and_run(proc_t p,struct uthread * uth,int setup_flags)4611*94d3b452SApple OSS Distributions workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags)
4612*94d3b452SApple OSS Distributions {
4613*94d3b452SApple OSS Distributions thread_t th = get_machthread(uth);
4614*94d3b452SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
4615*94d3b452SApple OSS Distributions
4616*94d3b452SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
4617*94d3b452SApple OSS Distributions /*
4618*94d3b452SApple OSS Distributions * For preemption reasons, we want to reset the voucher as late as
4619*94d3b452SApple OSS Distributions * possible, so we do it in two places:
4620*94d3b452SApple OSS Distributions * - Just before parking (i.e. in workq_park_and_unlock())
4621*94d3b452SApple OSS Distributions * - Prior to doing the setup for the next workitem (i.e. here)
4622*94d3b452SApple OSS Distributions *
4623*94d3b452SApple OSS Distributions * Those two places are sufficient to ensure we always reset it before
4624*94d3b452SApple OSS Distributions * it goes back out to user space, but be careful to not break that
4625*94d3b452SApple OSS Distributions * guarantee.
4626*94d3b452SApple OSS Distributions *
4627*94d3b452SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
4628*94d3b452SApple OSS Distributions * thread group on this thread
4629*94d3b452SApple OSS Distributions */
4630*94d3b452SApple OSS Distributions __assert_only kern_return_t kr;
4631*94d3b452SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
4632*94d3b452SApple OSS Distributions assert(kr == KERN_SUCCESS);
4633*94d3b452SApple OSS Distributions }
4634*94d3b452SApple OSS Distributions
4635*94d3b452SApple OSS Distributions uint32_t upcall_flags = uth->uu_save.uus_workq_park_data.upcall_flags;
4636*94d3b452SApple OSS Distributions if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
4637*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
4638*94d3b452SApple OSS Distributions }
4639*94d3b452SApple OSS Distributions
4640*94d3b452SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
4641*94d3b452SApple OSS Distributions /*
4642*94d3b452SApple OSS Distributions * For threads that have an outside-of-QoS thread priority, indicate
4643*94d3b452SApple OSS Distributions * to userspace that setting QoS should only affect the TSD and not
4644*94d3b452SApple OSS Distributions * change QOS in the kernel.
4645*94d3b452SApple OSS Distributions */
4646*94d3b452SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
4647*94d3b452SApple OSS Distributions } else {
4648*94d3b452SApple OSS Distributions /*
4649*94d3b452SApple OSS Distributions * Put the QoS class value into the lower bits of the reuse_thread
4650*94d3b452SApple OSS Distributions * register, this is where the thread priority used to be stored
4651*94d3b452SApple OSS Distributions * anyway.
4652*94d3b452SApple OSS Distributions */
4653*94d3b452SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
4654*94d3b452SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
4655*94d3b452SApple OSS Distributions }
4656*94d3b452SApple OSS Distributions
4657*94d3b452SApple OSS Distributions if (uth->uu_workq_thport == MACH_PORT_NULL) {
4658*94d3b452SApple OSS Distributions /* convert_thread_to_port_pinned() consumes a reference */
4659*94d3b452SApple OSS Distributions thread_reference(th);
4660*94d3b452SApple OSS Distributions /* Convert to immovable/pinned thread port, but port is not pinned yet */
4661*94d3b452SApple OSS Distributions ipc_port_t port = convert_thread_to_port_pinned(th);
4662*94d3b452SApple OSS Distributions /* Atomically, pin and copy out the port */
4663*94d3b452SApple OSS Distributions uth->uu_workq_thport = ipc_port_copyout_send_pinned(port, get_task_ipcspace(proc_task(p)));
4664*94d3b452SApple OSS Distributions }
4665*94d3b452SApple OSS Distributions
4666*94d3b452SApple OSS Distributions /* Thread has been set up to run, arm its next workqueue quantum or disarm
4667*94d3b452SApple OSS Distributions * if it is no longer supporting that */
4668*94d3b452SApple OSS Distributions if (thread_supports_cooperative_workqueue(th)) {
4669*94d3b452SApple OSS Distributions thread_arm_workqueue_quantum(th);
4670*94d3b452SApple OSS Distributions } else {
4671*94d3b452SApple OSS Distributions thread_disarm_workqueue_quantum(th);
4672*94d3b452SApple OSS Distributions }
4673*94d3b452SApple OSS Distributions
4674*94d3b452SApple OSS Distributions /*
4675*94d3b452SApple OSS Distributions * Call out to pthread, this sets up the thread, pulls in kevent structs
4676*94d3b452SApple OSS Distributions * onto the stack, sets up the thread state and then returns to userspace.
4677*94d3b452SApple OSS Distributions */
4678*94d3b452SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
4679*94d3b452SApple OSS Distributions proc_get_wqptr_fast(p), 0, 0, 0);
4680*94d3b452SApple OSS Distributions
4681*94d3b452SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
4682*94d3b452SApple OSS Distributions thread_sched_call(th, NULL);
4683*94d3b452SApple OSS Distributions } else {
4684*94d3b452SApple OSS Distributions thread_sched_call(th, workq_sched_callback);
4685*94d3b452SApple OSS Distributions }
4686*94d3b452SApple OSS Distributions
4687*94d3b452SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
4688*94d3b452SApple OSS Distributions uth->uu_workq_thport, 0, setup_flags, upcall_flags);
4689*94d3b452SApple OSS Distributions
4690*94d3b452SApple OSS Distributions __builtin_unreachable();
4691*94d3b452SApple OSS Distributions }
4692*94d3b452SApple OSS Distributions
4693*94d3b452SApple OSS Distributions #pragma mark misc
4694*94d3b452SApple OSS Distributions
4695*94d3b452SApple OSS Distributions int
fill_procworkqueue(proc_t p,struct proc_workqueueinfo * pwqinfo)4696*94d3b452SApple OSS Distributions fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
4697*94d3b452SApple OSS Distributions {
4698*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
4699*94d3b452SApple OSS Distributions int error = 0;
4700*94d3b452SApple OSS Distributions int activecount;
4701*94d3b452SApple OSS Distributions
4702*94d3b452SApple OSS Distributions if (wq == NULL) {
4703*94d3b452SApple OSS Distributions return EINVAL;
4704*94d3b452SApple OSS Distributions }
4705*94d3b452SApple OSS Distributions
4706*94d3b452SApple OSS Distributions /*
4707*94d3b452SApple OSS Distributions * This is sometimes called from interrupt context by the kperf sampler.
4708*94d3b452SApple OSS Distributions * In that case, it's not safe to spin trying to take the lock since we
4709*94d3b452SApple OSS Distributions * might already hold it. So, we just try-lock it and error out if it's
4710*94d3b452SApple OSS Distributions * already held. Since this is just a debugging aid, and all our callers
4711*94d3b452SApple OSS Distributions * are able to handle an error, that's fine.
4712*94d3b452SApple OSS Distributions */
4713*94d3b452SApple OSS Distributions bool locked = workq_lock_try(wq);
4714*94d3b452SApple OSS Distributions if (!locked) {
4715*94d3b452SApple OSS Distributions return EBUSY;
4716*94d3b452SApple OSS Distributions }
4717*94d3b452SApple OSS Distributions
4718*94d3b452SApple OSS Distributions wq_thactive_t act = _wq_thactive(wq);
4719*94d3b452SApple OSS Distributions activecount = _wq_thactive_aggregate_downto_qos(wq, act,
4720*94d3b452SApple OSS Distributions WORKQ_THREAD_QOS_MIN, NULL, NULL);
4721*94d3b452SApple OSS Distributions if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
4722*94d3b452SApple OSS Distributions activecount++;
4723*94d3b452SApple OSS Distributions }
4724*94d3b452SApple OSS Distributions pwqinfo->pwq_nthreads = wq->wq_nthreads;
4725*94d3b452SApple OSS Distributions pwqinfo->pwq_runthreads = activecount;
4726*94d3b452SApple OSS Distributions pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
4727*94d3b452SApple OSS Distributions pwqinfo->pwq_state = 0;
4728*94d3b452SApple OSS Distributions
4729*94d3b452SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
4730*94d3b452SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
4731*94d3b452SApple OSS Distributions }
4732*94d3b452SApple OSS Distributions
4733*94d3b452SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
4734*94d3b452SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
4735*94d3b452SApple OSS Distributions }
4736*94d3b452SApple OSS Distributions
4737*94d3b452SApple OSS Distributions workq_unlock(wq);
4738*94d3b452SApple OSS Distributions return error;
4739*94d3b452SApple OSS Distributions }
4740*94d3b452SApple OSS Distributions
4741*94d3b452SApple OSS Distributions boolean_t
workqueue_get_pwq_exceeded(void * v,boolean_t * exceeded_total,boolean_t * exceeded_constrained)4742*94d3b452SApple OSS Distributions workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
4743*94d3b452SApple OSS Distributions boolean_t *exceeded_constrained)
4744*94d3b452SApple OSS Distributions {
4745*94d3b452SApple OSS Distributions proc_t p = v;
4746*94d3b452SApple OSS Distributions struct proc_workqueueinfo pwqinfo;
4747*94d3b452SApple OSS Distributions int err;
4748*94d3b452SApple OSS Distributions
4749*94d3b452SApple OSS Distributions assert(p != NULL);
4750*94d3b452SApple OSS Distributions assert(exceeded_total != NULL);
4751*94d3b452SApple OSS Distributions assert(exceeded_constrained != NULL);
4752*94d3b452SApple OSS Distributions
4753*94d3b452SApple OSS Distributions err = fill_procworkqueue(p, &pwqinfo);
4754*94d3b452SApple OSS Distributions if (err) {
4755*94d3b452SApple OSS Distributions return FALSE;
4756*94d3b452SApple OSS Distributions }
4757*94d3b452SApple OSS Distributions if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
4758*94d3b452SApple OSS Distributions return FALSE;
4759*94d3b452SApple OSS Distributions }
4760*94d3b452SApple OSS Distributions
4761*94d3b452SApple OSS Distributions *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
4762*94d3b452SApple OSS Distributions *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
4763*94d3b452SApple OSS Distributions
4764*94d3b452SApple OSS Distributions return TRUE;
4765*94d3b452SApple OSS Distributions }
4766*94d3b452SApple OSS Distributions
4767*94d3b452SApple OSS Distributions uint32_t
workqueue_get_pwq_state_kdp(void * v)4768*94d3b452SApple OSS Distributions workqueue_get_pwq_state_kdp(void * v)
4769*94d3b452SApple OSS Distributions {
4770*94d3b452SApple OSS Distributions static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
4771*94d3b452SApple OSS Distributions kTaskWqExceededConstrainedThreadLimit);
4772*94d3b452SApple OSS Distributions static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
4773*94d3b452SApple OSS Distributions kTaskWqExceededTotalThreadLimit);
4774*94d3b452SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
4775*94d3b452SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
4776*94d3b452SApple OSS Distributions WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
4777*94d3b452SApple OSS Distributions
4778*94d3b452SApple OSS Distributions if (v == NULL) {
4779*94d3b452SApple OSS Distributions return 0;
4780*94d3b452SApple OSS Distributions }
4781*94d3b452SApple OSS Distributions
4782*94d3b452SApple OSS Distributions proc_t p = v;
4783*94d3b452SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
4784*94d3b452SApple OSS Distributions
4785*94d3b452SApple OSS Distributions if (wq == NULL || workq_lock_is_acquired_kdp(wq)) {
4786*94d3b452SApple OSS Distributions return 0;
4787*94d3b452SApple OSS Distributions }
4788*94d3b452SApple OSS Distributions
4789*94d3b452SApple OSS Distributions uint32_t pwq_state = WQ_FLAGS_AVAILABLE;
4790*94d3b452SApple OSS Distributions
4791*94d3b452SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
4792*94d3b452SApple OSS Distributions pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
4793*94d3b452SApple OSS Distributions }
4794*94d3b452SApple OSS Distributions
4795*94d3b452SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
4796*94d3b452SApple OSS Distributions pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
4797*94d3b452SApple OSS Distributions }
4798*94d3b452SApple OSS Distributions
4799*94d3b452SApple OSS Distributions return pwq_state;
4800*94d3b452SApple OSS Distributions }
4801*94d3b452SApple OSS Distributions
4802*94d3b452SApple OSS Distributions void
workq_init(void)4803*94d3b452SApple OSS Distributions workq_init(void)
4804*94d3b452SApple OSS Distributions {
4805*94d3b452SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
4806*94d3b452SApple OSS Distributions NSEC_PER_USEC, &wq_stalled_window.abstime);
4807*94d3b452SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
4808*94d3b452SApple OSS Distributions NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
4809*94d3b452SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
4810*94d3b452SApple OSS Distributions NSEC_PER_USEC, &wq_max_timer_interval.abstime);
4811*94d3b452SApple OSS Distributions
4812*94d3b452SApple OSS Distributions thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
4813*94d3b452SApple OSS Distributions workq_deallocate_queue_invoke);
4814*94d3b452SApple OSS Distributions }
4815