1*19c3b8c2SApple OSS Distributions /*
2*19c3b8c2SApple OSS Distributions * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3*19c3b8c2SApple OSS Distributions *
4*19c3b8c2SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*19c3b8c2SApple OSS Distributions *
6*19c3b8c2SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*19c3b8c2SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*19c3b8c2SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*19c3b8c2SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*19c3b8c2SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*19c3b8c2SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*19c3b8c2SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*19c3b8c2SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*19c3b8c2SApple OSS Distributions *
15*19c3b8c2SApple OSS Distributions * Please obtain a copy of the License at
16*19c3b8c2SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*19c3b8c2SApple OSS Distributions *
18*19c3b8c2SApple OSS Distributions * The Original Code and all software distributed under the License are
19*19c3b8c2SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*19c3b8c2SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*19c3b8c2SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*19c3b8c2SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*19c3b8c2SApple OSS Distributions * Please see the License for the specific language governing rights and
24*19c3b8c2SApple OSS Distributions * limitations under the License.
25*19c3b8c2SApple OSS Distributions *
26*19c3b8c2SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*19c3b8c2SApple OSS Distributions */
28*19c3b8c2SApple OSS Distributions /* Copyright (c) 1995-2018 Apple, Inc. All Rights Reserved */
29*19c3b8c2SApple OSS Distributions
30*19c3b8c2SApple OSS Distributions #include <sys/cdefs.h>
31*19c3b8c2SApple OSS Distributions
32*19c3b8c2SApple OSS Distributions #include <kern/assert.h>
33*19c3b8c2SApple OSS Distributions #include <kern/ast.h>
34*19c3b8c2SApple OSS Distributions #include <kern/clock.h>
35*19c3b8c2SApple OSS Distributions #include <kern/cpu_data.h>
36*19c3b8c2SApple OSS Distributions #include <kern/kern_types.h>
37*19c3b8c2SApple OSS Distributions #include <kern/policy_internal.h>
38*19c3b8c2SApple OSS Distributions #include <kern/processor.h>
39*19c3b8c2SApple OSS Distributions #include <kern/sched_prim.h> /* for thread_exception_return */
40*19c3b8c2SApple OSS Distributions #include <kern/task.h>
41*19c3b8c2SApple OSS Distributions #include <kern/thread.h>
42*19c3b8c2SApple OSS Distributions #include <kern/thread_group.h>
43*19c3b8c2SApple OSS Distributions #include <kern/zalloc.h>
44*19c3b8c2SApple OSS Distributions #include <mach/kern_return.h>
45*19c3b8c2SApple OSS Distributions #include <mach/mach_param.h>
46*19c3b8c2SApple OSS Distributions #include <mach/mach_port.h>
47*19c3b8c2SApple OSS Distributions #include <mach/mach_types.h>
48*19c3b8c2SApple OSS Distributions #include <mach/mach_vm.h>
49*19c3b8c2SApple OSS Distributions #include <mach/sync_policy.h>
50*19c3b8c2SApple OSS Distributions #include <mach/task.h>
51*19c3b8c2SApple OSS Distributions #include <mach/thread_act.h> /* for thread_resume */
52*19c3b8c2SApple OSS Distributions #include <mach/thread_policy.h>
53*19c3b8c2SApple OSS Distributions #include <mach/thread_status.h>
54*19c3b8c2SApple OSS Distributions #include <mach/vm_prot.h>
55*19c3b8c2SApple OSS Distributions #include <mach/vm_statistics.h>
56*19c3b8c2SApple OSS Distributions #include <machine/atomic.h>
57*19c3b8c2SApple OSS Distributions #include <machine/machine_routines.h>
58*19c3b8c2SApple OSS Distributions #include <machine/smp.h>
59*19c3b8c2SApple OSS Distributions #include <vm/vm_map.h>
60*19c3b8c2SApple OSS Distributions #include <vm/vm_protos.h>
61*19c3b8c2SApple OSS Distributions
62*19c3b8c2SApple OSS Distributions #include <sys/eventvar.h>
63*19c3b8c2SApple OSS Distributions #include <sys/kdebug.h>
64*19c3b8c2SApple OSS Distributions #include <sys/kernel.h>
65*19c3b8c2SApple OSS Distributions #include <sys/lock.h>
66*19c3b8c2SApple OSS Distributions #include <sys/param.h>
67*19c3b8c2SApple OSS Distributions #include <sys/proc_info.h> /* for fill_procworkqueue */
68*19c3b8c2SApple OSS Distributions #include <sys/proc_internal.h>
69*19c3b8c2SApple OSS Distributions #include <sys/pthread_shims.h>
70*19c3b8c2SApple OSS Distributions #include <sys/resourcevar.h>
71*19c3b8c2SApple OSS Distributions #include <sys/signalvar.h>
72*19c3b8c2SApple OSS Distributions #include <sys/sysctl.h>
73*19c3b8c2SApple OSS Distributions #include <sys/sysproto.h>
74*19c3b8c2SApple OSS Distributions #include <sys/systm.h>
75*19c3b8c2SApple OSS Distributions #include <sys/ulock.h> /* for ulock_owner_value_to_port_name */
76*19c3b8c2SApple OSS Distributions
77*19c3b8c2SApple OSS Distributions #include <pthread/bsdthread_private.h>
78*19c3b8c2SApple OSS Distributions #include <pthread/workqueue_syscalls.h>
79*19c3b8c2SApple OSS Distributions #include <pthread/workqueue_internal.h>
80*19c3b8c2SApple OSS Distributions #include <pthread/workqueue_trace.h>
81*19c3b8c2SApple OSS Distributions
82*19c3b8c2SApple OSS Distributions #include <os/log.h>
83*19c3b8c2SApple OSS Distributions
84*19c3b8c2SApple OSS Distributions static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
85*19c3b8c2SApple OSS Distributions static void workq_schedule_creator(proc_t p, struct workqueue *wq,
86*19c3b8c2SApple OSS Distributions workq_kern_threadreq_flags_t flags);
87*19c3b8c2SApple OSS Distributions
88*19c3b8c2SApple OSS Distributions static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
89*19c3b8c2SApple OSS Distributions workq_threadreq_t req);
90*19c3b8c2SApple OSS Distributions
91*19c3b8c2SApple OSS Distributions static uint32_t workq_constrained_allowance(struct workqueue *wq,
92*19c3b8c2SApple OSS Distributions thread_qos_t at_qos, struct uthread *uth, bool may_start_timer);
93*19c3b8c2SApple OSS Distributions
94*19c3b8c2SApple OSS Distributions static bool _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq);
95*19c3b8c2SApple OSS Distributions
96*19c3b8c2SApple OSS Distributions static bool workq_thread_is_busy(uint64_t cur_ts,
97*19c3b8c2SApple OSS Distributions _Atomic uint64_t *lastblocked_tsp);
98*19c3b8c2SApple OSS Distributions
99*19c3b8c2SApple OSS Distributions static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
100*19c3b8c2SApple OSS Distributions
101*19c3b8c2SApple OSS Distributions static bool
102*19c3b8c2SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags);
103*19c3b8c2SApple OSS Distributions
104*19c3b8c2SApple OSS Distributions static inline void
105*19c3b8c2SApple OSS Distributions workq_lock_spin(struct workqueue *wq);
106*19c3b8c2SApple OSS Distributions
107*19c3b8c2SApple OSS Distributions static inline void
108*19c3b8c2SApple OSS Distributions workq_unlock(struct workqueue *wq);
109*19c3b8c2SApple OSS Distributions
110*19c3b8c2SApple OSS Distributions #pragma mark globals
111*19c3b8c2SApple OSS Distributions
112*19c3b8c2SApple OSS Distributions struct workq_usec_var {
113*19c3b8c2SApple OSS Distributions uint32_t usecs;
114*19c3b8c2SApple OSS Distributions uint64_t abstime;
115*19c3b8c2SApple OSS Distributions };
116*19c3b8c2SApple OSS Distributions
117*19c3b8c2SApple OSS Distributions #define WORKQ_SYSCTL_USECS(var, init) \
118*19c3b8c2SApple OSS Distributions static struct workq_usec_var var = { .usecs = init }; \
119*19c3b8c2SApple OSS Distributions SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
120*19c3b8c2SApple OSS Distributions CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
121*19c3b8c2SApple OSS Distributions workq_sysctl_handle_usecs, "I", "")
122*19c3b8c2SApple OSS Distributions
123*19c3b8c2SApple OSS Distributions static LCK_GRP_DECLARE(workq_lck_grp, "workq");
124*19c3b8c2SApple OSS Distributions os_refgrp_decl(static, workq_refgrp, "workq", NULL);
125*19c3b8c2SApple OSS Distributions
126*19c3b8c2SApple OSS Distributions static ZONE_DEFINE(workq_zone_workqueue, "workq.wq",
127*19c3b8c2SApple OSS Distributions sizeof(struct workqueue), ZC_NONE);
128*19c3b8c2SApple OSS Distributions static ZONE_DEFINE(workq_zone_threadreq, "workq.threadreq",
129*19c3b8c2SApple OSS Distributions sizeof(struct workq_threadreq_s), ZC_CACHING);
130*19c3b8c2SApple OSS Distributions
131*19c3b8c2SApple OSS Distributions static struct mpsc_daemon_queue workq_deallocate_queue;
132*19c3b8c2SApple OSS Distributions
133*19c3b8c2SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
134*19c3b8c2SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
135*19c3b8c2SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
136*19c3b8c2SApple OSS Distributions static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
137*19c3b8c2SApple OSS Distributions static uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
138*19c3b8c2SApple OSS Distributions static uint32_t wq_init_constrained_limit = 1;
139*19c3b8c2SApple OSS Distributions static uint16_t wq_death_max_load;
140*19c3b8c2SApple OSS Distributions static uint32_t wq_max_parallelism[WORKQ_NUM_QOS_BUCKETS];
141*19c3b8c2SApple OSS Distributions
142*19c3b8c2SApple OSS Distributions /*
143*19c3b8c2SApple OSS Distributions * This is not a hard limit but the max size we want to aim to hit across the
144*19c3b8c2SApple OSS Distributions * entire cooperative pool. We can oversubscribe the pool due to non-cooperative
145*19c3b8c2SApple OSS Distributions * workers and the max we will oversubscribe the pool by, is a total of
146*19c3b8c2SApple OSS Distributions * wq_max_cooperative_threads * WORKQ_NUM_QOS_BUCKETS.
147*19c3b8c2SApple OSS Distributions */
148*19c3b8c2SApple OSS Distributions static uint32_t wq_max_cooperative_threads;
149*19c3b8c2SApple OSS Distributions
150*19c3b8c2SApple OSS Distributions static inline uint32_t
wq_cooperative_queue_max_size(struct workqueue * wq)151*19c3b8c2SApple OSS Distributions wq_cooperative_queue_max_size(struct workqueue *wq)
152*19c3b8c2SApple OSS Distributions {
153*19c3b8c2SApple OSS Distributions return wq->wq_cooperative_queue_has_limited_max_size ? 1 : wq_max_cooperative_threads;
154*19c3b8c2SApple OSS Distributions }
155*19c3b8c2SApple OSS Distributions
156*19c3b8c2SApple OSS Distributions #pragma mark sysctls
157*19c3b8c2SApple OSS Distributions
158*19c3b8c2SApple OSS Distributions static int
159*19c3b8c2SApple OSS Distributions workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS
160*19c3b8c2SApple OSS Distributions {
161*19c3b8c2SApple OSS Distributions #pragma unused(arg2)
162*19c3b8c2SApple OSS Distributions struct workq_usec_var *v = arg1;
163*19c3b8c2SApple OSS Distributions int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
164*19c3b8c2SApple OSS Distributions if (error || !req->newptr) {
165*19c3b8c2SApple OSS Distributions return error;
166*19c3b8c2SApple OSS Distributions }
167*19c3b8c2SApple OSS Distributions clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
168*19c3b8c2SApple OSS Distributions &v->abstime);
169*19c3b8c2SApple OSS Distributions return 0;
170*19c3b8c2SApple OSS Distributions }
171*19c3b8c2SApple OSS Distributions
172*19c3b8c2SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
173*19c3b8c2SApple OSS Distributions &wq_max_threads, 0, "");
174*19c3b8c2SApple OSS Distributions
175*19c3b8c2SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
176*19c3b8c2SApple OSS Distributions &wq_max_constrained_threads, 0, "");
177*19c3b8c2SApple OSS Distributions
178*19c3b8c2SApple OSS Distributions static int
179*19c3b8c2SApple OSS Distributions wq_limit_cooperative_threads_for_proc SYSCTL_HANDLER_ARGS
180*19c3b8c2SApple OSS Distributions {
181*19c3b8c2SApple OSS Distributions #pragma unused(arg1, arg2, oidp)
182*19c3b8c2SApple OSS Distributions int input_pool_size = 0;
183*19c3b8c2SApple OSS Distributions int changed;
184*19c3b8c2SApple OSS Distributions int error = 0;
185*19c3b8c2SApple OSS Distributions
186*19c3b8c2SApple OSS Distributions error = sysctl_io_number(req, 0, sizeof(int), &input_pool_size, &changed);
187*19c3b8c2SApple OSS Distributions if (error || !changed) {
188*19c3b8c2SApple OSS Distributions return error;
189*19c3b8c2SApple OSS Distributions }
190*19c3b8c2SApple OSS Distributions
191*19c3b8c2SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_DEFAULT 0
192*19c3b8c2SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS -1
193*19c3b8c2SApple OSS Distributions /* Not available currently, but sysctl interface is designed to allow these
194*19c3b8c2SApple OSS Distributions * extra parameters:
195*19c3b8c2SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_STRICT : -2 (across all bucket)
196*19c3b8c2SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_CUSTOM : [1, 512]
197*19c3b8c2SApple OSS Distributions */
198*19c3b8c2SApple OSS Distributions
199*19c3b8c2SApple OSS Distributions if (input_pool_size != WQ_COOPERATIVE_POOL_SIZE_DEFAULT
200*19c3b8c2SApple OSS Distributions && input_pool_size != WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS) {
201*19c3b8c2SApple OSS Distributions error = EINVAL;
202*19c3b8c2SApple OSS Distributions goto out;
203*19c3b8c2SApple OSS Distributions }
204*19c3b8c2SApple OSS Distributions
205*19c3b8c2SApple OSS Distributions proc_t p = req->p;
206*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
207*19c3b8c2SApple OSS Distributions
208*19c3b8c2SApple OSS Distributions if (wq != NULL) {
209*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
210*19c3b8c2SApple OSS Distributions if (wq->wq_reqcount > 0 || wq->wq_nthreads > 0) {
211*19c3b8c2SApple OSS Distributions // Hackily enforce that the workqueue is still new (no requests or
212*19c3b8c2SApple OSS Distributions // threads)
213*19c3b8c2SApple OSS Distributions error = ENOTSUP;
214*19c3b8c2SApple OSS Distributions } else {
215*19c3b8c2SApple OSS Distributions wq->wq_cooperative_queue_has_limited_max_size = (input_pool_size == WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS);
216*19c3b8c2SApple OSS Distributions }
217*19c3b8c2SApple OSS Distributions workq_unlock(wq);
218*19c3b8c2SApple OSS Distributions } else {
219*19c3b8c2SApple OSS Distributions /* This process has no workqueue, calling this syctl makes no sense */
220*19c3b8c2SApple OSS Distributions return ENOTSUP;
221*19c3b8c2SApple OSS Distributions }
222*19c3b8c2SApple OSS Distributions
223*19c3b8c2SApple OSS Distributions out:
224*19c3b8c2SApple OSS Distributions return error;
225*19c3b8c2SApple OSS Distributions }
226*19c3b8c2SApple OSS Distributions
227*19c3b8c2SApple OSS Distributions SYSCTL_PROC(_kern, OID_AUTO, wq_limit_cooperative_threads,
228*19c3b8c2SApple OSS Distributions CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_WR | CTLFLAG_LOCKED | CTLTYPE_INT, 0, 0,
229*19c3b8c2SApple OSS Distributions wq_limit_cooperative_threads_for_proc,
230*19c3b8c2SApple OSS Distributions "I", "Modify the max pool size of the cooperative pool");
231*19c3b8c2SApple OSS Distributions
232*19c3b8c2SApple OSS Distributions #pragma mark p_wqptr
233*19c3b8c2SApple OSS Distributions
234*19c3b8c2SApple OSS Distributions #define WQPTR_IS_INITING_VALUE ((struct workqueue *)~(uintptr_t)0)
235*19c3b8c2SApple OSS Distributions
236*19c3b8c2SApple OSS Distributions static struct workqueue *
proc_get_wqptr_fast(struct proc * p)237*19c3b8c2SApple OSS Distributions proc_get_wqptr_fast(struct proc *p)
238*19c3b8c2SApple OSS Distributions {
239*19c3b8c2SApple OSS Distributions return os_atomic_load(&p->p_wqptr, relaxed);
240*19c3b8c2SApple OSS Distributions }
241*19c3b8c2SApple OSS Distributions
242*19c3b8c2SApple OSS Distributions struct workqueue *
proc_get_wqptr(struct proc * p)243*19c3b8c2SApple OSS Distributions proc_get_wqptr(struct proc *p)
244*19c3b8c2SApple OSS Distributions {
245*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
246*19c3b8c2SApple OSS Distributions return wq == WQPTR_IS_INITING_VALUE ? NULL : wq;
247*19c3b8c2SApple OSS Distributions }
248*19c3b8c2SApple OSS Distributions
249*19c3b8c2SApple OSS Distributions static void
proc_set_wqptr(struct proc * p,struct workqueue * wq)250*19c3b8c2SApple OSS Distributions proc_set_wqptr(struct proc *p, struct workqueue *wq)
251*19c3b8c2SApple OSS Distributions {
252*19c3b8c2SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, wq, release);
253*19c3b8c2SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
254*19c3b8c2SApple OSS Distributions proc_lock(p);
255*19c3b8c2SApple OSS Distributions thread_wakeup(&p->p_wqptr);
256*19c3b8c2SApple OSS Distributions proc_unlock(p);
257*19c3b8c2SApple OSS Distributions }
258*19c3b8c2SApple OSS Distributions }
259*19c3b8c2SApple OSS Distributions
260*19c3b8c2SApple OSS Distributions static bool
proc_init_wqptr_or_wait(struct proc * p)261*19c3b8c2SApple OSS Distributions proc_init_wqptr_or_wait(struct proc *p)
262*19c3b8c2SApple OSS Distributions {
263*19c3b8c2SApple OSS Distributions struct workqueue *wq;
264*19c3b8c2SApple OSS Distributions
265*19c3b8c2SApple OSS Distributions proc_lock(p);
266*19c3b8c2SApple OSS Distributions wq = os_atomic_load(&p->p_wqptr, relaxed);
267*19c3b8c2SApple OSS Distributions
268*19c3b8c2SApple OSS Distributions if (wq == NULL) {
269*19c3b8c2SApple OSS Distributions os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
270*19c3b8c2SApple OSS Distributions proc_unlock(p);
271*19c3b8c2SApple OSS Distributions return true;
272*19c3b8c2SApple OSS Distributions }
273*19c3b8c2SApple OSS Distributions
274*19c3b8c2SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
275*19c3b8c2SApple OSS Distributions assert_wait(&p->p_wqptr, THREAD_UNINT);
276*19c3b8c2SApple OSS Distributions proc_unlock(p);
277*19c3b8c2SApple OSS Distributions thread_block(THREAD_CONTINUE_NULL);
278*19c3b8c2SApple OSS Distributions } else {
279*19c3b8c2SApple OSS Distributions proc_unlock(p);
280*19c3b8c2SApple OSS Distributions }
281*19c3b8c2SApple OSS Distributions return false;
282*19c3b8c2SApple OSS Distributions }
283*19c3b8c2SApple OSS Distributions
284*19c3b8c2SApple OSS Distributions static inline event_t
workq_parked_wait_event(struct uthread * uth)285*19c3b8c2SApple OSS Distributions workq_parked_wait_event(struct uthread *uth)
286*19c3b8c2SApple OSS Distributions {
287*19c3b8c2SApple OSS Distributions return (event_t)&uth->uu_workq_stackaddr;
288*19c3b8c2SApple OSS Distributions }
289*19c3b8c2SApple OSS Distributions
290*19c3b8c2SApple OSS Distributions static inline void
workq_thread_wakeup(struct uthread * uth)291*19c3b8c2SApple OSS Distributions workq_thread_wakeup(struct uthread *uth)
292*19c3b8c2SApple OSS Distributions {
293*19c3b8c2SApple OSS Distributions thread_wakeup_thread(workq_parked_wait_event(uth), get_machthread(uth));
294*19c3b8c2SApple OSS Distributions }
295*19c3b8c2SApple OSS Distributions
296*19c3b8c2SApple OSS Distributions #pragma mark wq_thactive
297*19c3b8c2SApple OSS Distributions
298*19c3b8c2SApple OSS Distributions #if defined(__LP64__)
299*19c3b8c2SApple OSS Distributions // Layout is:
300*19c3b8c2SApple OSS Distributions // 127 - 115 : 13 bits of zeroes
301*19c3b8c2SApple OSS Distributions // 114 - 112 : best QoS among all pending constrained requests
302*19c3b8c2SApple OSS Distributions // 111 - 0 : MGR, AUI, UI, IN, DF, UT, BG+MT buckets every 16 bits
303*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 16
304*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (7 * WQ_THACTIVE_BUCKET_WIDTH)
305*19c3b8c2SApple OSS Distributions #else
306*19c3b8c2SApple OSS Distributions // Layout is:
307*19c3b8c2SApple OSS Distributions // 63 - 61 : best QoS among all pending constrained requests
308*19c3b8c2SApple OSS Distributions // 60 : Manager bucket (0 or 1)
309*19c3b8c2SApple OSS Distributions // 59 - 0 : AUI, UI, IN, DF, UT, BG+MT buckets every 10 bits
310*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 10
311*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
312*19c3b8c2SApple OSS Distributions #endif
313*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_BUCKET_MASK ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
314*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
315*19c3b8c2SApple OSS Distributions
316*19c3b8c2SApple OSS Distributions static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
317*19c3b8c2SApple OSS Distributions "Make sure we have space to encode a QoS");
318*19c3b8c2SApple OSS Distributions
319*19c3b8c2SApple OSS Distributions static inline wq_thactive_t
_wq_thactive(struct workqueue * wq)320*19c3b8c2SApple OSS Distributions _wq_thactive(struct workqueue *wq)
321*19c3b8c2SApple OSS Distributions {
322*19c3b8c2SApple OSS Distributions return os_atomic_load_wide(&wq->wq_thactive, relaxed);
323*19c3b8c2SApple OSS Distributions }
324*19c3b8c2SApple OSS Distributions
325*19c3b8c2SApple OSS Distributions static inline uint8_t
_wq_bucket(thread_qos_t qos)326*19c3b8c2SApple OSS Distributions _wq_bucket(thread_qos_t qos)
327*19c3b8c2SApple OSS Distributions {
328*19c3b8c2SApple OSS Distributions // Map both BG and MT to the same bucket by over-shifting down and
329*19c3b8c2SApple OSS Distributions // clamping MT and BG together.
330*19c3b8c2SApple OSS Distributions switch (qos) {
331*19c3b8c2SApple OSS Distributions case THREAD_QOS_MAINTENANCE:
332*19c3b8c2SApple OSS Distributions return 0;
333*19c3b8c2SApple OSS Distributions default:
334*19c3b8c2SApple OSS Distributions return qos - 2;
335*19c3b8c2SApple OSS Distributions }
336*19c3b8c2SApple OSS Distributions }
337*19c3b8c2SApple OSS Distributions
338*19c3b8c2SApple OSS Distributions #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
339*19c3b8c2SApple OSS Distributions ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
340*19c3b8c2SApple OSS Distributions
341*19c3b8c2SApple OSS Distributions static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue * wq)342*19c3b8c2SApple OSS Distributions _wq_thactive_best_constrained_req_qos(struct workqueue *wq)
343*19c3b8c2SApple OSS Distributions {
344*19c3b8c2SApple OSS Distributions // Avoid expensive atomic operations: the three bits we're loading are in
345*19c3b8c2SApple OSS Distributions // a single byte, and always updated under the workqueue lock
346*19c3b8c2SApple OSS Distributions wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
347*19c3b8c2SApple OSS Distributions return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
348*19c3b8c2SApple OSS Distributions }
349*19c3b8c2SApple OSS Distributions
350*19c3b8c2SApple OSS Distributions static void
_wq_thactive_refresh_best_constrained_req_qos(struct workqueue * wq)351*19c3b8c2SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq)
352*19c3b8c2SApple OSS Distributions {
353*19c3b8c2SApple OSS Distributions thread_qos_t old_qos, new_qos;
354*19c3b8c2SApple OSS Distributions workq_threadreq_t req;
355*19c3b8c2SApple OSS Distributions
356*19c3b8c2SApple OSS Distributions req = priority_queue_max(&wq->wq_constrained_queue,
357*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
358*19c3b8c2SApple OSS Distributions new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
359*19c3b8c2SApple OSS Distributions old_qos = _wq_thactive_best_constrained_req_qos(wq);
360*19c3b8c2SApple OSS Distributions if (old_qos != new_qos) {
361*19c3b8c2SApple OSS Distributions long delta = (long)new_qos - (long)old_qos;
362*19c3b8c2SApple OSS Distributions wq_thactive_t v = (wq_thactive_t)delta << WQ_THACTIVE_QOS_SHIFT;
363*19c3b8c2SApple OSS Distributions /*
364*19c3b8c2SApple OSS Distributions * We can do an atomic add relative to the initial load because updates
365*19c3b8c2SApple OSS Distributions * to this qos are always serialized under the workqueue lock.
366*19c3b8c2SApple OSS Distributions */
367*19c3b8c2SApple OSS Distributions v = os_atomic_add(&wq->wq_thactive, v, relaxed);
368*19c3b8c2SApple OSS Distributions #ifdef __LP64__
369*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
370*19c3b8c2SApple OSS Distributions (uint64_t)(v >> 64), 0);
371*19c3b8c2SApple OSS Distributions #else
372*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0);
373*19c3b8c2SApple OSS Distributions #endif
374*19c3b8c2SApple OSS Distributions }
375*19c3b8c2SApple OSS Distributions }
376*19c3b8c2SApple OSS Distributions
377*19c3b8c2SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_offset_for_qos(thread_qos_t qos)378*19c3b8c2SApple OSS Distributions _wq_thactive_offset_for_qos(thread_qos_t qos)
379*19c3b8c2SApple OSS Distributions {
380*19c3b8c2SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
381*19c3b8c2SApple OSS Distributions __builtin_assume(bucket < WORKQ_NUM_BUCKETS);
382*19c3b8c2SApple OSS Distributions return (wq_thactive_t)1 << (bucket * WQ_THACTIVE_BUCKET_WIDTH);
383*19c3b8c2SApple OSS Distributions }
384*19c3b8c2SApple OSS Distributions
385*19c3b8c2SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_inc(struct workqueue * wq,thread_qos_t qos)386*19c3b8c2SApple OSS Distributions _wq_thactive_inc(struct workqueue *wq, thread_qos_t qos)
387*19c3b8c2SApple OSS Distributions {
388*19c3b8c2SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
389*19c3b8c2SApple OSS Distributions return os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
390*19c3b8c2SApple OSS Distributions }
391*19c3b8c2SApple OSS Distributions
392*19c3b8c2SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_dec(struct workqueue * wq,thread_qos_t qos)393*19c3b8c2SApple OSS Distributions _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos)
394*19c3b8c2SApple OSS Distributions {
395*19c3b8c2SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
396*19c3b8c2SApple OSS Distributions return os_atomic_sub_orig(&wq->wq_thactive, v, relaxed);
397*19c3b8c2SApple OSS Distributions }
398*19c3b8c2SApple OSS Distributions
399*19c3b8c2SApple OSS Distributions static inline void
_wq_thactive_move(struct workqueue * wq,thread_qos_t old_qos,thread_qos_t new_qos)400*19c3b8c2SApple OSS Distributions _wq_thactive_move(struct workqueue *wq,
401*19c3b8c2SApple OSS Distributions thread_qos_t old_qos, thread_qos_t new_qos)
402*19c3b8c2SApple OSS Distributions {
403*19c3b8c2SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
404*19c3b8c2SApple OSS Distributions _wq_thactive_offset_for_qos(old_qos);
405*19c3b8c2SApple OSS Distributions os_atomic_add(&wq->wq_thactive, v, relaxed);
406*19c3b8c2SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
407*19c3b8c2SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
408*19c3b8c2SApple OSS Distributions }
409*19c3b8c2SApple OSS Distributions
410*19c3b8c2SApple OSS Distributions static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue * wq,wq_thactive_t v,thread_qos_t qos,uint32_t * busycount,uint32_t * max_busycount)411*19c3b8c2SApple OSS Distributions _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
412*19c3b8c2SApple OSS Distributions thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
413*19c3b8c2SApple OSS Distributions {
414*19c3b8c2SApple OSS Distributions uint32_t count = 0, active;
415*19c3b8c2SApple OSS Distributions uint64_t curtime;
416*19c3b8c2SApple OSS Distributions
417*19c3b8c2SApple OSS Distributions assert(WORKQ_THREAD_QOS_MIN <= qos && qos <= WORKQ_THREAD_QOS_MAX);
418*19c3b8c2SApple OSS Distributions
419*19c3b8c2SApple OSS Distributions if (busycount) {
420*19c3b8c2SApple OSS Distributions curtime = mach_absolute_time();
421*19c3b8c2SApple OSS Distributions *busycount = 0;
422*19c3b8c2SApple OSS Distributions }
423*19c3b8c2SApple OSS Distributions if (max_busycount) {
424*19c3b8c2SApple OSS Distributions *max_busycount = THREAD_QOS_LAST - qos;
425*19c3b8c2SApple OSS Distributions }
426*19c3b8c2SApple OSS Distributions
427*19c3b8c2SApple OSS Distributions uint8_t i = _wq_bucket(qos);
428*19c3b8c2SApple OSS Distributions v >>= i * WQ_THACTIVE_BUCKET_WIDTH;
429*19c3b8c2SApple OSS Distributions for (; i < WORKQ_NUM_QOS_BUCKETS; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
430*19c3b8c2SApple OSS Distributions active = v & WQ_THACTIVE_BUCKET_MASK;
431*19c3b8c2SApple OSS Distributions count += active;
432*19c3b8c2SApple OSS Distributions
433*19c3b8c2SApple OSS Distributions if (busycount && wq->wq_thscheduled_count[i] > active) {
434*19c3b8c2SApple OSS Distributions if (workq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
435*19c3b8c2SApple OSS Distributions /*
436*19c3b8c2SApple OSS Distributions * We only consider the last blocked thread for a given bucket
437*19c3b8c2SApple OSS Distributions * as busy because we don't want to take the list lock in each
438*19c3b8c2SApple OSS Distributions * sched callback. However this is an approximation that could
439*19c3b8c2SApple OSS Distributions * contribute to thread creation storms.
440*19c3b8c2SApple OSS Distributions */
441*19c3b8c2SApple OSS Distributions (*busycount)++;
442*19c3b8c2SApple OSS Distributions }
443*19c3b8c2SApple OSS Distributions }
444*19c3b8c2SApple OSS Distributions }
445*19c3b8c2SApple OSS Distributions
446*19c3b8c2SApple OSS Distributions return count;
447*19c3b8c2SApple OSS Distributions }
448*19c3b8c2SApple OSS Distributions
449*19c3b8c2SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
450*19c3b8c2SApple OSS Distributions * for any overrides */
451*19c3b8c2SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_dec(struct workqueue * wq,thread_qos_t qos)452*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(struct workqueue *wq, thread_qos_t qos)
453*19c3b8c2SApple OSS Distributions {
454*19c3b8c2SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]--;
455*19c3b8c2SApple OSS Distributions assert(old_scheduled_count > 0);
456*19c3b8c2SApple OSS Distributions }
457*19c3b8c2SApple OSS Distributions
458*19c3b8c2SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
459*19c3b8c2SApple OSS Distributions * for any overrides */
460*19c3b8c2SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_inc(struct workqueue * wq,thread_qos_t qos)461*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(struct workqueue *wq, thread_qos_t qos)
462*19c3b8c2SApple OSS Distributions {
463*19c3b8c2SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]++;
464*19c3b8c2SApple OSS Distributions assert(old_scheduled_count < UINT8_MAX);
465*19c3b8c2SApple OSS Distributions }
466*19c3b8c2SApple OSS Distributions
467*19c3b8c2SApple OSS Distributions #pragma mark wq_flags
468*19c3b8c2SApple OSS Distributions
469*19c3b8c2SApple OSS Distributions static inline uint32_t
_wq_flags(struct workqueue * wq)470*19c3b8c2SApple OSS Distributions _wq_flags(struct workqueue *wq)
471*19c3b8c2SApple OSS Distributions {
472*19c3b8c2SApple OSS Distributions return os_atomic_load(&wq->wq_flags, relaxed);
473*19c3b8c2SApple OSS Distributions }
474*19c3b8c2SApple OSS Distributions
475*19c3b8c2SApple OSS Distributions static inline bool
_wq_exiting(struct workqueue * wq)476*19c3b8c2SApple OSS Distributions _wq_exiting(struct workqueue *wq)
477*19c3b8c2SApple OSS Distributions {
478*19c3b8c2SApple OSS Distributions return _wq_flags(wq) & WQ_EXITING;
479*19c3b8c2SApple OSS Distributions }
480*19c3b8c2SApple OSS Distributions
481*19c3b8c2SApple OSS Distributions bool
workq_is_exiting(struct proc * p)482*19c3b8c2SApple OSS Distributions workq_is_exiting(struct proc *p)
483*19c3b8c2SApple OSS Distributions {
484*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
485*19c3b8c2SApple OSS Distributions return !wq || _wq_exiting(wq);
486*19c3b8c2SApple OSS Distributions }
487*19c3b8c2SApple OSS Distributions
488*19c3b8c2SApple OSS Distributions
489*19c3b8c2SApple OSS Distributions #pragma mark workqueue lock
490*19c3b8c2SApple OSS Distributions
491*19c3b8c2SApple OSS Distributions static bool
workq_lock_is_acquired_kdp(struct workqueue * wq)492*19c3b8c2SApple OSS Distributions workq_lock_is_acquired_kdp(struct workqueue *wq)
493*19c3b8c2SApple OSS Distributions {
494*19c3b8c2SApple OSS Distributions return kdp_lck_ticket_is_acquired(&wq->wq_lock);
495*19c3b8c2SApple OSS Distributions }
496*19c3b8c2SApple OSS Distributions
497*19c3b8c2SApple OSS Distributions static inline void
workq_lock_spin(struct workqueue * wq)498*19c3b8c2SApple OSS Distributions workq_lock_spin(struct workqueue *wq)
499*19c3b8c2SApple OSS Distributions {
500*19c3b8c2SApple OSS Distributions lck_ticket_lock(&wq->wq_lock, &workq_lck_grp);
501*19c3b8c2SApple OSS Distributions }
502*19c3b8c2SApple OSS Distributions
503*19c3b8c2SApple OSS Distributions static inline void
workq_lock_held(struct workqueue * wq)504*19c3b8c2SApple OSS Distributions workq_lock_held(struct workqueue *wq)
505*19c3b8c2SApple OSS Distributions {
506*19c3b8c2SApple OSS Distributions LCK_TICKET_ASSERT_OWNED(&wq->wq_lock);
507*19c3b8c2SApple OSS Distributions }
508*19c3b8c2SApple OSS Distributions
509*19c3b8c2SApple OSS Distributions static inline bool
workq_lock_try(struct workqueue * wq)510*19c3b8c2SApple OSS Distributions workq_lock_try(struct workqueue *wq)
511*19c3b8c2SApple OSS Distributions {
512*19c3b8c2SApple OSS Distributions return lck_ticket_lock_try(&wq->wq_lock, &workq_lck_grp);
513*19c3b8c2SApple OSS Distributions }
514*19c3b8c2SApple OSS Distributions
515*19c3b8c2SApple OSS Distributions static inline void
workq_unlock(struct workqueue * wq)516*19c3b8c2SApple OSS Distributions workq_unlock(struct workqueue *wq)
517*19c3b8c2SApple OSS Distributions {
518*19c3b8c2SApple OSS Distributions lck_ticket_unlock(&wq->wq_lock);
519*19c3b8c2SApple OSS Distributions }
520*19c3b8c2SApple OSS Distributions
521*19c3b8c2SApple OSS Distributions #pragma mark idle thread lists
522*19c3b8c2SApple OSS Distributions
523*19c3b8c2SApple OSS Distributions #define WORKQ_POLICY_INIT(qos) \
524*19c3b8c2SApple OSS Distributions (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
525*19c3b8c2SApple OSS Distributions
526*19c3b8c2SApple OSS Distributions static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)527*19c3b8c2SApple OSS Distributions workq_pri_bucket(struct uu_workq_policy req)
528*19c3b8c2SApple OSS Distributions {
529*19c3b8c2SApple OSS Distributions return MAX(MAX(req.qos_req, req.qos_max), req.qos_override);
530*19c3b8c2SApple OSS Distributions }
531*19c3b8c2SApple OSS Distributions
532*19c3b8c2SApple OSS Distributions static inline thread_qos_t
workq_pri_override(struct uu_workq_policy req)533*19c3b8c2SApple OSS Distributions workq_pri_override(struct uu_workq_policy req)
534*19c3b8c2SApple OSS Distributions {
535*19c3b8c2SApple OSS Distributions return MAX(workq_pri_bucket(req), req.qos_bucket);
536*19c3b8c2SApple OSS Distributions }
537*19c3b8c2SApple OSS Distributions
538*19c3b8c2SApple OSS Distributions static inline bool
workq_thread_needs_params_change(workq_threadreq_t req,struct uthread * uth)539*19c3b8c2SApple OSS Distributions workq_thread_needs_params_change(workq_threadreq_t req, struct uthread *uth)
540*19c3b8c2SApple OSS Distributions {
541*19c3b8c2SApple OSS Distributions workq_threadreq_param_t cur_trp, req_trp = { };
542*19c3b8c2SApple OSS Distributions
543*19c3b8c2SApple OSS Distributions cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
544*19c3b8c2SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
545*19c3b8c2SApple OSS Distributions req_trp = kqueue_threadreq_workloop_param(req);
546*19c3b8c2SApple OSS Distributions }
547*19c3b8c2SApple OSS Distributions
548*19c3b8c2SApple OSS Distributions /*
549*19c3b8c2SApple OSS Distributions * CPU percent flags are handled separately to policy changes, so ignore
550*19c3b8c2SApple OSS Distributions * them for all of these checks.
551*19c3b8c2SApple OSS Distributions */
552*19c3b8c2SApple OSS Distributions uint16_t cur_flags = (cur_trp.trp_flags & ~TRP_CPUPERCENT);
553*19c3b8c2SApple OSS Distributions uint16_t req_flags = (req_trp.trp_flags & ~TRP_CPUPERCENT);
554*19c3b8c2SApple OSS Distributions
555*19c3b8c2SApple OSS Distributions if (!req_flags && !cur_flags) {
556*19c3b8c2SApple OSS Distributions return false;
557*19c3b8c2SApple OSS Distributions }
558*19c3b8c2SApple OSS Distributions
559*19c3b8c2SApple OSS Distributions if (req_flags != cur_flags) {
560*19c3b8c2SApple OSS Distributions return true;
561*19c3b8c2SApple OSS Distributions }
562*19c3b8c2SApple OSS Distributions
563*19c3b8c2SApple OSS Distributions if ((req_flags & TRP_PRIORITY) && req_trp.trp_pri != cur_trp.trp_pri) {
564*19c3b8c2SApple OSS Distributions return true;
565*19c3b8c2SApple OSS Distributions }
566*19c3b8c2SApple OSS Distributions
567*19c3b8c2SApple OSS Distributions if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
568*19c3b8c2SApple OSS Distributions return true;
569*19c3b8c2SApple OSS Distributions }
570*19c3b8c2SApple OSS Distributions
571*19c3b8c2SApple OSS Distributions return false;
572*19c3b8c2SApple OSS Distributions }
573*19c3b8c2SApple OSS Distributions
574*19c3b8c2SApple OSS Distributions static inline bool
workq_thread_needs_priority_change(workq_threadreq_t req,struct uthread * uth)575*19c3b8c2SApple OSS Distributions workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth)
576*19c3b8c2SApple OSS Distributions {
577*19c3b8c2SApple OSS Distributions if (workq_thread_needs_params_change(req, uth)) {
578*19c3b8c2SApple OSS Distributions return true;
579*19c3b8c2SApple OSS Distributions }
580*19c3b8c2SApple OSS Distributions
581*19c3b8c2SApple OSS Distributions if (req->tr_qos != workq_pri_override(uth->uu_workq_pri)) {
582*19c3b8c2SApple OSS Distributions return true;
583*19c3b8c2SApple OSS Distributions }
584*19c3b8c2SApple OSS Distributions
585*19c3b8c2SApple OSS Distributions #if CONFIG_PREADOPT_TG
586*19c3b8c2SApple OSS Distributions thread_group_qos_t tg = kqr_preadopt_thread_group(req);
587*19c3b8c2SApple OSS Distributions if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
588*19c3b8c2SApple OSS Distributions /*
589*19c3b8c2SApple OSS Distributions * Ideally, we'd add check here to see if thread's preadopt TG is same
590*19c3b8c2SApple OSS Distributions * as the thread requests's thread group and short circuit if that is
591*19c3b8c2SApple OSS Distributions * the case. But in the interest of keeping the code clean and not
592*19c3b8c2SApple OSS Distributions * taking the thread lock here, we're going to skip this. We will
593*19c3b8c2SApple OSS Distributions * eventually shortcircuit once we try to set the preadoption thread
594*19c3b8c2SApple OSS Distributions * group on the thread.
595*19c3b8c2SApple OSS Distributions */
596*19c3b8c2SApple OSS Distributions return true;
597*19c3b8c2SApple OSS Distributions }
598*19c3b8c2SApple OSS Distributions #endif
599*19c3b8c2SApple OSS Distributions
600*19c3b8c2SApple OSS Distributions return false;
601*19c3b8c2SApple OSS Distributions }
602*19c3b8c2SApple OSS Distributions
603*19c3b8c2SApple OSS Distributions /* Input thread must be self. Called during self override, resetting overrides
604*19c3b8c2SApple OSS Distributions * or while processing kevents
605*19c3b8c2SApple OSS Distributions *
606*19c3b8c2SApple OSS Distributions * Called with workq lock held. Sometimes also the thread mutex
607*19c3b8c2SApple OSS Distributions */
608*19c3b8c2SApple OSS Distributions static void
workq_thread_update_bucket(proc_t p,struct workqueue * wq,struct uthread * uth,struct uu_workq_policy old_pri,struct uu_workq_policy new_pri,bool force_run)609*19c3b8c2SApple OSS Distributions workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
610*19c3b8c2SApple OSS Distributions struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
611*19c3b8c2SApple OSS Distributions bool force_run)
612*19c3b8c2SApple OSS Distributions {
613*19c3b8c2SApple OSS Distributions assert(uth == current_uthread());
614*19c3b8c2SApple OSS Distributions
615*19c3b8c2SApple OSS Distributions thread_qos_t old_bucket = old_pri.qos_bucket;
616*19c3b8c2SApple OSS Distributions thread_qos_t new_bucket = workq_pri_bucket(new_pri);
617*19c3b8c2SApple OSS Distributions
618*19c3b8c2SApple OSS Distributions if (old_bucket != new_bucket) {
619*19c3b8c2SApple OSS Distributions _wq_thactive_move(wq, old_bucket, new_bucket);
620*19c3b8c2SApple OSS Distributions }
621*19c3b8c2SApple OSS Distributions
622*19c3b8c2SApple OSS Distributions new_pri.qos_bucket = new_bucket;
623*19c3b8c2SApple OSS Distributions uth->uu_workq_pri = new_pri;
624*19c3b8c2SApple OSS Distributions
625*19c3b8c2SApple OSS Distributions if (old_pri.qos_override != new_pri.qos_override) {
626*19c3b8c2SApple OSS Distributions thread_set_workq_override(get_machthread(uth), new_pri.qos_override);
627*19c3b8c2SApple OSS Distributions }
628*19c3b8c2SApple OSS Distributions
629*19c3b8c2SApple OSS Distributions if (wq->wq_reqcount && (old_bucket > new_bucket || force_run)) {
630*19c3b8c2SApple OSS Distributions int flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
631*19c3b8c2SApple OSS Distributions if (old_bucket > new_bucket) {
632*19c3b8c2SApple OSS Distributions /*
633*19c3b8c2SApple OSS Distributions * When lowering our bucket, we may unblock a thread request,
634*19c3b8c2SApple OSS Distributions * but we can't drop our priority before we have evaluated
635*19c3b8c2SApple OSS Distributions * whether this is the case, and if we ever drop the workqueue lock
636*19c3b8c2SApple OSS Distributions * that would cause a priority inversion.
637*19c3b8c2SApple OSS Distributions *
638*19c3b8c2SApple OSS Distributions * We hence have to disallow thread creation in that case.
639*19c3b8c2SApple OSS Distributions */
640*19c3b8c2SApple OSS Distributions flags = 0;
641*19c3b8c2SApple OSS Distributions }
642*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, flags);
643*19c3b8c2SApple OSS Distributions }
644*19c3b8c2SApple OSS Distributions }
645*19c3b8c2SApple OSS Distributions
646*19c3b8c2SApple OSS Distributions /*
647*19c3b8c2SApple OSS Distributions * Sets/resets the cpu percent limits on the current thread. We can't set
648*19c3b8c2SApple OSS Distributions * these limits from outside of the current thread, so this function needs
649*19c3b8c2SApple OSS Distributions * to be called when we're executing on the intended
650*19c3b8c2SApple OSS Distributions */
651*19c3b8c2SApple OSS Distributions static void
workq_thread_reset_cpupercent(workq_threadreq_t req,struct uthread * uth)652*19c3b8c2SApple OSS Distributions workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth)
653*19c3b8c2SApple OSS Distributions {
654*19c3b8c2SApple OSS Distributions assert(uth == current_uthread());
655*19c3b8c2SApple OSS Distributions workq_threadreq_param_t trp = { };
656*19c3b8c2SApple OSS Distributions
657*19c3b8c2SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
658*19c3b8c2SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
659*19c3b8c2SApple OSS Distributions }
660*19c3b8c2SApple OSS Distributions
661*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_CPUPERCENT) {
662*19c3b8c2SApple OSS Distributions /*
663*19c3b8c2SApple OSS Distributions * Going through disable when we have an existing CPU percent limit
664*19c3b8c2SApple OSS Distributions * set will force the ledger to refill the token bucket of the current
665*19c3b8c2SApple OSS Distributions * thread. Removing any penalty applied by previous thread use.
666*19c3b8c2SApple OSS Distributions */
667*19c3b8c2SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
668*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_CPUPERCENT;
669*19c3b8c2SApple OSS Distributions }
670*19c3b8c2SApple OSS Distributions
671*19c3b8c2SApple OSS Distributions if (trp.trp_flags & TRP_CPUPERCENT) {
672*19c3b8c2SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
673*19c3b8c2SApple OSS Distributions (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
674*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
675*19c3b8c2SApple OSS Distributions }
676*19c3b8c2SApple OSS Distributions }
677*19c3b8c2SApple OSS Distributions
678*19c3b8c2SApple OSS Distributions /* Called with the workq lock held */
679*19c3b8c2SApple OSS Distributions static void
workq_thread_reset_pri(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req,bool unpark)680*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
681*19c3b8c2SApple OSS Distributions workq_threadreq_t req, bool unpark)
682*19c3b8c2SApple OSS Distributions {
683*19c3b8c2SApple OSS Distributions thread_t th = get_machthread(uth);
684*19c3b8c2SApple OSS Distributions thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
685*19c3b8c2SApple OSS Distributions workq_threadreq_param_t trp = { };
686*19c3b8c2SApple OSS Distributions int priority = 31;
687*19c3b8c2SApple OSS Distributions int policy = POLICY_TIMESHARE;
688*19c3b8c2SApple OSS Distributions
689*19c3b8c2SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
690*19c3b8c2SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
691*19c3b8c2SApple OSS Distributions }
692*19c3b8c2SApple OSS Distributions
693*19c3b8c2SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
694*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
695*19c3b8c2SApple OSS Distributions
696*19c3b8c2SApple OSS Distributions if (unpark) {
697*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
698*19c3b8c2SApple OSS Distributions // qos sent out to userspace (may differ from uu_workq_pri on param threads)
699*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
700*19c3b8c2SApple OSS Distributions }
701*19c3b8c2SApple OSS Distributions
702*19c3b8c2SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
703*19c3b8c2SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
704*19c3b8c2SApple OSS Distributions assert(trp.trp_value == 0); // manager qos and thread policy don't mix
705*19c3b8c2SApple OSS Distributions
706*19c3b8c2SApple OSS Distributions if (_pthread_priority_has_sched_pri(mgr_pri)) {
707*19c3b8c2SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
708*19c3b8c2SApple OSS Distributions thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
709*19c3b8c2SApple OSS Distributions POLICY_TIMESHARE);
710*19c3b8c2SApple OSS Distributions return;
711*19c3b8c2SApple OSS Distributions }
712*19c3b8c2SApple OSS Distributions
713*19c3b8c2SApple OSS Distributions qos = _pthread_priority_thread_qos(mgr_pri);
714*19c3b8c2SApple OSS Distributions } else {
715*19c3b8c2SApple OSS Distributions if (trp.trp_flags & TRP_PRIORITY) {
716*19c3b8c2SApple OSS Distributions qos = THREAD_QOS_UNSPECIFIED;
717*19c3b8c2SApple OSS Distributions priority = trp.trp_pri;
718*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_OUTSIDE_QOS;
719*19c3b8c2SApple OSS Distributions }
720*19c3b8c2SApple OSS Distributions
721*19c3b8c2SApple OSS Distributions if (trp.trp_flags & TRP_POLICY) {
722*19c3b8c2SApple OSS Distributions policy = trp.trp_pol;
723*19c3b8c2SApple OSS Distributions }
724*19c3b8c2SApple OSS Distributions }
725*19c3b8c2SApple OSS Distributions
726*19c3b8c2SApple OSS Distributions #if CONFIG_PREADOPT_TG
727*19c3b8c2SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP)) {
728*19c3b8c2SApple OSS Distributions /*
729*19c3b8c2SApple OSS Distributions * We cannot safely read and borrow the reference from the kqwl since it
730*19c3b8c2SApple OSS Distributions * can disappear from under us at any time due to the max-ing logic in
731*19c3b8c2SApple OSS Distributions * kqueue_set_preadopted_thread_group.
732*19c3b8c2SApple OSS Distributions *
733*19c3b8c2SApple OSS Distributions * As such, we do the following dance:
734*19c3b8c2SApple OSS Distributions *
735*19c3b8c2SApple OSS Distributions * 1) cmpxchng and steal the kqwl's preadopt thread group and leave
736*19c3b8c2SApple OSS Distributions * behind with (NULL + QoS). At this point, we have the reference
737*19c3b8c2SApple OSS Distributions * to the thread group from the kqwl.
738*19c3b8c2SApple OSS Distributions * 2) Have the thread set the preadoption thread group on itself.
739*19c3b8c2SApple OSS Distributions * 3) cmpxchng from (NULL + QoS) which we set earlier in (1), back to
740*19c3b8c2SApple OSS Distributions * thread_group + QoS. ie we try to give the reference back to the kqwl.
741*19c3b8c2SApple OSS Distributions * If we fail, that's because a higher QoS thread group was set on the
742*19c3b8c2SApple OSS Distributions * kqwl in kqueue_set_preadopted_thread_group in which case, we need to
743*19c3b8c2SApple OSS Distributions * go back to (1).
744*19c3b8c2SApple OSS Distributions */
745*19c3b8c2SApple OSS Distributions
746*19c3b8c2SApple OSS Distributions _Atomic(struct thread_group *) * tg_loc = kqr_preadopt_thread_group_addr(req);
747*19c3b8c2SApple OSS Distributions
748*19c3b8c2SApple OSS Distributions thread_group_qos_t old_tg, new_tg;
749*19c3b8c2SApple OSS Distributions int ret = 0;
750*19c3b8c2SApple OSS Distributions again:
751*19c3b8c2SApple OSS Distributions ret = os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
752*19c3b8c2SApple OSS Distributions if (!KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) {
753*19c3b8c2SApple OSS Distributions os_atomic_rmw_loop_give_up(break);
754*19c3b8c2SApple OSS Distributions }
755*19c3b8c2SApple OSS Distributions
756*19c3b8c2SApple OSS Distributions /*
757*19c3b8c2SApple OSS Distributions * Leave the QoS behind - kqueue_set_preadopted_thread_group will
758*19c3b8c2SApple OSS Distributions * only modify it if there is a higher QoS thread group to attach
759*19c3b8c2SApple OSS Distributions */
760*19c3b8c2SApple OSS Distributions new_tg = (thread_group_qos_t) ((uintptr_t) old_tg & KQWL_PREADOPT_TG_QOS_MASK);
761*19c3b8c2SApple OSS Distributions });
762*19c3b8c2SApple OSS Distributions
763*19c3b8c2SApple OSS Distributions if (ret) {
764*19c3b8c2SApple OSS Distributions /*
765*19c3b8c2SApple OSS Distributions * We successfully took the ref from the kqwl so set it on the
766*19c3b8c2SApple OSS Distributions * thread now
767*19c3b8c2SApple OSS Distributions */
768*19c3b8c2SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
769*19c3b8c2SApple OSS Distributions
770*19c3b8c2SApple OSS Distributions thread_group_qos_t thread_group_to_expect = new_tg;
771*19c3b8c2SApple OSS Distributions thread_group_qos_t thread_group_to_set = old_tg;
772*19c3b8c2SApple OSS Distributions
773*19c3b8c2SApple OSS Distributions os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
774*19c3b8c2SApple OSS Distributions if (old_tg != thread_group_to_expect) {
775*19c3b8c2SApple OSS Distributions /*
776*19c3b8c2SApple OSS Distributions * There was an intervening write to the kqwl_preadopt_tg,
777*19c3b8c2SApple OSS Distributions * and it has a higher QoS than what we are working with
778*19c3b8c2SApple OSS Distributions * here. Abandon our current adopted thread group and redo
779*19c3b8c2SApple OSS Distributions * the full dance
780*19c3b8c2SApple OSS Distributions */
781*19c3b8c2SApple OSS Distributions thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(thread_group_to_set));
782*19c3b8c2SApple OSS Distributions os_atomic_rmw_loop_give_up(goto again);
783*19c3b8c2SApple OSS Distributions }
784*19c3b8c2SApple OSS Distributions
785*19c3b8c2SApple OSS Distributions new_tg = thread_group_to_set;
786*19c3b8c2SApple OSS Distributions });
787*19c3b8c2SApple OSS Distributions } else {
788*19c3b8c2SApple OSS Distributions /* Nothing valid on the kqwl, just clear what's on the thread */
789*19c3b8c2SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
790*19c3b8c2SApple OSS Distributions }
791*19c3b8c2SApple OSS Distributions } else {
792*19c3b8c2SApple OSS Distributions /* Not even a kqwl, clear what's on the thread */
793*19c3b8c2SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
794*19c3b8c2SApple OSS Distributions }
795*19c3b8c2SApple OSS Distributions #endif
796*19c3b8c2SApple OSS Distributions thread_set_workq_pri(th, qos, priority, policy);
797*19c3b8c2SApple OSS Distributions }
798*19c3b8c2SApple OSS Distributions
799*19c3b8c2SApple OSS Distributions /*
800*19c3b8c2SApple OSS Distributions * Called by kevent with the NOTE_WL_THREAD_REQUEST knote lock held,
801*19c3b8c2SApple OSS Distributions * every time a servicer is being told about a new max QoS.
802*19c3b8c2SApple OSS Distributions */
803*19c3b8c2SApple OSS Distributions void
workq_thread_set_max_qos(struct proc * p,workq_threadreq_t kqr)804*19c3b8c2SApple OSS Distributions workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
805*19c3b8c2SApple OSS Distributions {
806*19c3b8c2SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
807*19c3b8c2SApple OSS Distributions struct uthread *uth = current_uthread();
808*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
809*19c3b8c2SApple OSS Distributions thread_qos_t qos = kqr->tr_kq_qos_index;
810*19c3b8c2SApple OSS Distributions
811*19c3b8c2SApple OSS Distributions if (uth->uu_workq_pri.qos_max == qos) {
812*19c3b8c2SApple OSS Distributions return;
813*19c3b8c2SApple OSS Distributions }
814*19c3b8c2SApple OSS Distributions
815*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
816*19c3b8c2SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
817*19c3b8c2SApple OSS Distributions new_pri.qos_max = qos;
818*19c3b8c2SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
819*19c3b8c2SApple OSS Distributions workq_unlock(wq);
820*19c3b8c2SApple OSS Distributions }
821*19c3b8c2SApple OSS Distributions
822*19c3b8c2SApple OSS Distributions #pragma mark idle threads accounting and handling
823*19c3b8c2SApple OSS Distributions
824*19c3b8c2SApple OSS Distributions static inline struct uthread *
workq_oldest_killable_idle_thread(struct workqueue * wq)825*19c3b8c2SApple OSS Distributions workq_oldest_killable_idle_thread(struct workqueue *wq)
826*19c3b8c2SApple OSS Distributions {
827*19c3b8c2SApple OSS Distributions struct uthread *uth = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
828*19c3b8c2SApple OSS Distributions
829*19c3b8c2SApple OSS Distributions if (uth && !uth->uu_save.uus_workq_park_data.has_stack) {
830*19c3b8c2SApple OSS Distributions uth = TAILQ_PREV(uth, workq_uthread_head, uu_workq_entry);
831*19c3b8c2SApple OSS Distributions if (uth) {
832*19c3b8c2SApple OSS Distributions assert(uth->uu_save.uus_workq_park_data.has_stack);
833*19c3b8c2SApple OSS Distributions }
834*19c3b8c2SApple OSS Distributions }
835*19c3b8c2SApple OSS Distributions return uth;
836*19c3b8c2SApple OSS Distributions }
837*19c3b8c2SApple OSS Distributions
838*19c3b8c2SApple OSS Distributions static inline uint64_t
workq_kill_delay_for_idle_thread(struct workqueue * wq)839*19c3b8c2SApple OSS Distributions workq_kill_delay_for_idle_thread(struct workqueue *wq)
840*19c3b8c2SApple OSS Distributions {
841*19c3b8c2SApple OSS Distributions uint64_t delay = wq_reduce_pool_window.abstime;
842*19c3b8c2SApple OSS Distributions uint16_t idle = wq->wq_thidlecount;
843*19c3b8c2SApple OSS Distributions
844*19c3b8c2SApple OSS Distributions /*
845*19c3b8c2SApple OSS Distributions * If we have less than wq_death_max_load threads, have a 5s timer.
846*19c3b8c2SApple OSS Distributions *
847*19c3b8c2SApple OSS Distributions * For the next wq_max_constrained_threads ones, decay linearly from
848*19c3b8c2SApple OSS Distributions * from 5s to 50ms.
849*19c3b8c2SApple OSS Distributions */
850*19c3b8c2SApple OSS Distributions if (idle <= wq_death_max_load) {
851*19c3b8c2SApple OSS Distributions return delay;
852*19c3b8c2SApple OSS Distributions }
853*19c3b8c2SApple OSS Distributions
854*19c3b8c2SApple OSS Distributions if (wq_max_constrained_threads > idle - wq_death_max_load) {
855*19c3b8c2SApple OSS Distributions delay *= (wq_max_constrained_threads - (idle - wq_death_max_load));
856*19c3b8c2SApple OSS Distributions }
857*19c3b8c2SApple OSS Distributions return delay / wq_max_constrained_threads;
858*19c3b8c2SApple OSS Distributions }
859*19c3b8c2SApple OSS Distributions
860*19c3b8c2SApple OSS Distributions static inline bool
workq_should_kill_idle_thread(struct workqueue * wq,struct uthread * uth,uint64_t now)861*19c3b8c2SApple OSS Distributions workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
862*19c3b8c2SApple OSS Distributions uint64_t now)
863*19c3b8c2SApple OSS Distributions {
864*19c3b8c2SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
865*19c3b8c2SApple OSS Distributions return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
866*19c3b8c2SApple OSS Distributions }
867*19c3b8c2SApple OSS Distributions
868*19c3b8c2SApple OSS Distributions static void
workq_death_call_schedule(struct workqueue * wq,uint64_t deadline)869*19c3b8c2SApple OSS Distributions workq_death_call_schedule(struct workqueue *wq, uint64_t deadline)
870*19c3b8c2SApple OSS Distributions {
871*19c3b8c2SApple OSS Distributions uint32_t wq_flags = os_atomic_load(&wq->wq_flags, relaxed);
872*19c3b8c2SApple OSS Distributions
873*19c3b8c2SApple OSS Distributions if (wq_flags & (WQ_EXITING | WQ_DEATH_CALL_SCHEDULED)) {
874*19c3b8c2SApple OSS Distributions return;
875*19c3b8c2SApple OSS Distributions }
876*19c3b8c2SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
877*19c3b8c2SApple OSS Distributions
878*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_NONE, wq, 1, 0, 0);
879*19c3b8c2SApple OSS Distributions
880*19c3b8c2SApple OSS Distributions /*
881*19c3b8c2SApple OSS Distributions * <rdar://problem/13139182> Due to how long term timers work, the leeway
882*19c3b8c2SApple OSS Distributions * can't be too short, so use 500ms which is long enough that we will not
883*19c3b8c2SApple OSS Distributions * wake up the CPU for killing threads, but short enough that it doesn't
884*19c3b8c2SApple OSS Distributions * fall into long-term timer list shenanigans.
885*19c3b8c2SApple OSS Distributions */
886*19c3b8c2SApple OSS Distributions thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
887*19c3b8c2SApple OSS Distributions wq_reduce_pool_window.abstime / 10,
888*19c3b8c2SApple OSS Distributions THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
889*19c3b8c2SApple OSS Distributions }
890*19c3b8c2SApple OSS Distributions
891*19c3b8c2SApple OSS Distributions /*
892*19c3b8c2SApple OSS Distributions * `decrement` is set to the number of threads that are no longer dying:
893*19c3b8c2SApple OSS Distributions * - because they have been resuscitated just in time (workq_pop_idle_thread)
894*19c3b8c2SApple OSS Distributions * - or have been killed (workq_thread_terminate).
895*19c3b8c2SApple OSS Distributions */
896*19c3b8c2SApple OSS Distributions static void
workq_death_policy_evaluate(struct workqueue * wq,uint16_t decrement)897*19c3b8c2SApple OSS Distributions workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement)
898*19c3b8c2SApple OSS Distributions {
899*19c3b8c2SApple OSS Distributions struct uthread *uth;
900*19c3b8c2SApple OSS Distributions
901*19c3b8c2SApple OSS Distributions assert(wq->wq_thdying_count >= decrement);
902*19c3b8c2SApple OSS Distributions if ((wq->wq_thdying_count -= decrement) > 0) {
903*19c3b8c2SApple OSS Distributions return;
904*19c3b8c2SApple OSS Distributions }
905*19c3b8c2SApple OSS Distributions
906*19c3b8c2SApple OSS Distributions if (wq->wq_thidlecount <= 1) {
907*19c3b8c2SApple OSS Distributions return;
908*19c3b8c2SApple OSS Distributions }
909*19c3b8c2SApple OSS Distributions
910*19c3b8c2SApple OSS Distributions if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
911*19c3b8c2SApple OSS Distributions return;
912*19c3b8c2SApple OSS Distributions }
913*19c3b8c2SApple OSS Distributions
914*19c3b8c2SApple OSS Distributions uint64_t now = mach_absolute_time();
915*19c3b8c2SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
916*19c3b8c2SApple OSS Distributions
917*19c3b8c2SApple OSS Distributions if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
918*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
919*19c3b8c2SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
920*19c3b8c2SApple OSS Distributions wq->wq_thdying_count++;
921*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
922*19c3b8c2SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
923*19c3b8c2SApple OSS Distributions workq_thread_wakeup(uth);
924*19c3b8c2SApple OSS Distributions }
925*19c3b8c2SApple OSS Distributions return;
926*19c3b8c2SApple OSS Distributions }
927*19c3b8c2SApple OSS Distributions
928*19c3b8c2SApple OSS Distributions workq_death_call_schedule(wq,
929*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp + delay);
930*19c3b8c2SApple OSS Distributions }
931*19c3b8c2SApple OSS Distributions
932*19c3b8c2SApple OSS Distributions void
workq_thread_terminate(struct proc * p,struct uthread * uth)933*19c3b8c2SApple OSS Distributions workq_thread_terminate(struct proc *p, struct uthread *uth)
934*19c3b8c2SApple OSS Distributions {
935*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
936*19c3b8c2SApple OSS Distributions
937*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
938*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
939*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
940*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
941*19c3b8c2SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
942*19c3b8c2SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
943*19c3b8c2SApple OSS Distributions }
944*19c3b8c2SApple OSS Distributions if (wq->wq_nthreads-- == wq_max_threads) {
945*19c3b8c2SApple OSS Distributions /*
946*19c3b8c2SApple OSS Distributions * We got under the thread limit again, which may have prevented
947*19c3b8c2SApple OSS Distributions * thread creation from happening, redrive if there are pending requests
948*19c3b8c2SApple OSS Distributions */
949*19c3b8c2SApple OSS Distributions if (wq->wq_reqcount) {
950*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
951*19c3b8c2SApple OSS Distributions }
952*19c3b8c2SApple OSS Distributions }
953*19c3b8c2SApple OSS Distributions workq_unlock(wq);
954*19c3b8c2SApple OSS Distributions
955*19c3b8c2SApple OSS Distributions thread_deallocate(get_machthread(uth));
956*19c3b8c2SApple OSS Distributions }
957*19c3b8c2SApple OSS Distributions
958*19c3b8c2SApple OSS Distributions static void
workq_kill_old_threads_call(void * param0,void * param1 __unused)959*19c3b8c2SApple OSS Distributions workq_kill_old_threads_call(void *param0, void *param1 __unused)
960*19c3b8c2SApple OSS Distributions {
961*19c3b8c2SApple OSS Distributions struct workqueue *wq = param0;
962*19c3b8c2SApple OSS Distributions
963*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
964*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0);
965*19c3b8c2SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
966*19c3b8c2SApple OSS Distributions workq_death_policy_evaluate(wq, 0);
967*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0);
968*19c3b8c2SApple OSS Distributions workq_unlock(wq);
969*19c3b8c2SApple OSS Distributions }
970*19c3b8c2SApple OSS Distributions
971*19c3b8c2SApple OSS Distributions static struct uthread *
workq_pop_idle_thread(struct workqueue * wq,uint16_t uu_flags,bool * needs_wakeup)972*19c3b8c2SApple OSS Distributions workq_pop_idle_thread(struct workqueue *wq, uint16_t uu_flags,
973*19c3b8c2SApple OSS Distributions bool *needs_wakeup)
974*19c3b8c2SApple OSS Distributions {
975*19c3b8c2SApple OSS Distributions struct uthread *uth;
976*19c3b8c2SApple OSS Distributions
977*19c3b8c2SApple OSS Distributions if ((uth = TAILQ_FIRST(&wq->wq_thidlelist))) {
978*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
979*19c3b8c2SApple OSS Distributions } else {
980*19c3b8c2SApple OSS Distributions uth = TAILQ_FIRST(&wq->wq_thnewlist);
981*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
982*19c3b8c2SApple OSS Distributions }
983*19c3b8c2SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
984*19c3b8c2SApple OSS Distributions
985*19c3b8c2SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
986*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
987*19c3b8c2SApple OSS Distributions
988*19c3b8c2SApple OSS Distributions /* A thread is never woken up as part of the cooperative pool */
989*19c3b8c2SApple OSS Distributions assert((uu_flags & UT_WORKQ_COOPERATIVE) == 0);
990*19c3b8c2SApple OSS Distributions
991*19c3b8c2SApple OSS Distributions if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
992*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
993*19c3b8c2SApple OSS Distributions }
994*19c3b8c2SApple OSS Distributions wq->wq_threads_scheduled++;
995*19c3b8c2SApple OSS Distributions wq->wq_thidlecount--;
996*19c3b8c2SApple OSS Distributions
997*19c3b8c2SApple OSS Distributions if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
998*19c3b8c2SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_DYING;
999*19c3b8c2SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
1000*19c3b8c2SApple OSS Distributions *needs_wakeup = false;
1001*19c3b8c2SApple OSS Distributions } else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
1002*19c3b8c2SApple OSS Distributions *needs_wakeup = false;
1003*19c3b8c2SApple OSS Distributions } else {
1004*19c3b8c2SApple OSS Distributions *needs_wakeup = true;
1005*19c3b8c2SApple OSS Distributions }
1006*19c3b8c2SApple OSS Distributions return uth;
1007*19c3b8c2SApple OSS Distributions }
1008*19c3b8c2SApple OSS Distributions
1009*19c3b8c2SApple OSS Distributions /*
1010*19c3b8c2SApple OSS Distributions * Called by thread_create_workq_waiting() during thread initialization, before
1011*19c3b8c2SApple OSS Distributions * assert_wait, before the thread has been started.
1012*19c3b8c2SApple OSS Distributions */
1013*19c3b8c2SApple OSS Distributions event_t
workq_thread_init_and_wq_lock(task_t task,thread_t th)1014*19c3b8c2SApple OSS Distributions workq_thread_init_and_wq_lock(task_t task, thread_t th)
1015*19c3b8c2SApple OSS Distributions {
1016*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1017*19c3b8c2SApple OSS Distributions
1018*19c3b8c2SApple OSS Distributions uth->uu_workq_flags = UT_WORKQ_NEW;
1019*19c3b8c2SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
1020*19c3b8c2SApple OSS Distributions uth->uu_workq_thport = MACH_PORT_NULL;
1021*19c3b8c2SApple OSS Distributions uth->uu_workq_stackaddr = 0;
1022*19c3b8c2SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = 0;
1023*19c3b8c2SApple OSS Distributions
1024*19c3b8c2SApple OSS Distributions thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
1025*19c3b8c2SApple OSS Distributions thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
1026*19c3b8c2SApple OSS Distributions
1027*19c3b8c2SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(get_bsdtask_info(task)));
1028*19c3b8c2SApple OSS Distributions return workq_parked_wait_event(uth);
1029*19c3b8c2SApple OSS Distributions }
1030*19c3b8c2SApple OSS Distributions
1031*19c3b8c2SApple OSS Distributions /**
1032*19c3b8c2SApple OSS Distributions * Try to add a new workqueue thread.
1033*19c3b8c2SApple OSS Distributions *
1034*19c3b8c2SApple OSS Distributions * - called with workq lock held
1035*19c3b8c2SApple OSS Distributions * - dropped and retaken around thread creation
1036*19c3b8c2SApple OSS Distributions * - return with workq lock held
1037*19c3b8c2SApple OSS Distributions */
1038*19c3b8c2SApple OSS Distributions static bool
workq_add_new_idle_thread(proc_t p,struct workqueue * wq)1039*19c3b8c2SApple OSS Distributions workq_add_new_idle_thread(proc_t p, struct workqueue *wq)
1040*19c3b8c2SApple OSS Distributions {
1041*19c3b8c2SApple OSS Distributions mach_vm_offset_t th_stackaddr;
1042*19c3b8c2SApple OSS Distributions kern_return_t kret;
1043*19c3b8c2SApple OSS Distributions thread_t th;
1044*19c3b8c2SApple OSS Distributions
1045*19c3b8c2SApple OSS Distributions wq->wq_nthreads++;
1046*19c3b8c2SApple OSS Distributions
1047*19c3b8c2SApple OSS Distributions workq_unlock(wq);
1048*19c3b8c2SApple OSS Distributions
1049*19c3b8c2SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1050*19c3b8c2SApple OSS Distributions
1051*19c3b8c2SApple OSS Distributions kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
1052*19c3b8c2SApple OSS Distributions if (kret != KERN_SUCCESS) {
1053*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1054*19c3b8c2SApple OSS Distributions kret, 1, 0);
1055*19c3b8c2SApple OSS Distributions goto out;
1056*19c3b8c2SApple OSS Distributions }
1057*19c3b8c2SApple OSS Distributions
1058*19c3b8c2SApple OSS Distributions kret = thread_create_workq_waiting(proc_task(p), workq_unpark_continue, &th);
1059*19c3b8c2SApple OSS Distributions if (kret != KERN_SUCCESS) {
1060*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1061*19c3b8c2SApple OSS Distributions kret, 0, 0);
1062*19c3b8c2SApple OSS Distributions pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
1063*19c3b8c2SApple OSS Distributions goto out;
1064*19c3b8c2SApple OSS Distributions }
1065*19c3b8c2SApple OSS Distributions
1066*19c3b8c2SApple OSS Distributions // thread_create_workq_waiting() will return with the wq lock held
1067*19c3b8c2SApple OSS Distributions // on success, because it calls workq_thread_init_and_wq_lock() above
1068*19c3b8c2SApple OSS Distributions
1069*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1070*19c3b8c2SApple OSS Distributions
1071*19c3b8c2SApple OSS Distributions wq->wq_creations++;
1072*19c3b8c2SApple OSS Distributions wq->wq_thidlecount++;
1073*19c3b8c2SApple OSS Distributions uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
1074*19c3b8c2SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1075*19c3b8c2SApple OSS Distributions
1076*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0);
1077*19c3b8c2SApple OSS Distributions return true;
1078*19c3b8c2SApple OSS Distributions
1079*19c3b8c2SApple OSS Distributions out:
1080*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
1081*19c3b8c2SApple OSS Distributions /*
1082*19c3b8c2SApple OSS Distributions * Do not redrive here if we went under wq_max_threads again,
1083*19c3b8c2SApple OSS Distributions * it is the responsibility of the callers of this function
1084*19c3b8c2SApple OSS Distributions * to do so when it fails.
1085*19c3b8c2SApple OSS Distributions */
1086*19c3b8c2SApple OSS Distributions wq->wq_nthreads--;
1087*19c3b8c2SApple OSS Distributions return false;
1088*19c3b8c2SApple OSS Distributions }
1089*19c3b8c2SApple OSS Distributions
1090*19c3b8c2SApple OSS Distributions static inline bool
workq_thread_is_overcommit(struct uthread * uth)1091*19c3b8c2SApple OSS Distributions workq_thread_is_overcommit(struct uthread *uth)
1092*19c3b8c2SApple OSS Distributions {
1093*19c3b8c2SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0;
1094*19c3b8c2SApple OSS Distributions }
1095*19c3b8c2SApple OSS Distributions
1096*19c3b8c2SApple OSS Distributions static inline bool
workq_thread_is_nonovercommit(struct uthread * uth)1097*19c3b8c2SApple OSS Distributions workq_thread_is_nonovercommit(struct uthread *uth)
1098*19c3b8c2SApple OSS Distributions {
1099*19c3b8c2SApple OSS Distributions return (uth->uu_workq_flags & (UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE)) == 0;
1100*19c3b8c2SApple OSS Distributions }
1101*19c3b8c2SApple OSS Distributions
1102*19c3b8c2SApple OSS Distributions static inline bool
workq_thread_is_cooperative(struct uthread * uth)1103*19c3b8c2SApple OSS Distributions workq_thread_is_cooperative(struct uthread *uth)
1104*19c3b8c2SApple OSS Distributions {
1105*19c3b8c2SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_COOPERATIVE) != 0;
1106*19c3b8c2SApple OSS Distributions }
1107*19c3b8c2SApple OSS Distributions
1108*19c3b8c2SApple OSS Distributions static inline void
workq_thread_set_type(struct uthread * uth,uint16_t flags)1109*19c3b8c2SApple OSS Distributions workq_thread_set_type(struct uthread *uth, uint16_t flags)
1110*19c3b8c2SApple OSS Distributions {
1111*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1112*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= flags;
1113*19c3b8c2SApple OSS Distributions }
1114*19c3b8c2SApple OSS Distributions
1115*19c3b8c2SApple OSS Distributions
1116*19c3b8c2SApple OSS Distributions #define WORKQ_UNPARK_FOR_DEATH_WAS_IDLE 0x1
1117*19c3b8c2SApple OSS Distributions
1118*19c3b8c2SApple OSS Distributions __attribute__((noreturn, noinline))
1119*19c3b8c2SApple OSS Distributions static void
workq_unpark_for_death_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t death_flags,uint32_t setup_flags)1120*19c3b8c2SApple OSS Distributions workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
1121*19c3b8c2SApple OSS Distributions struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
1122*19c3b8c2SApple OSS Distributions {
1123*19c3b8c2SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
1124*19c3b8c2SApple OSS Distributions bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
1125*19c3b8c2SApple OSS Distributions
1126*19c3b8c2SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
1127*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
1128*19c3b8c2SApple OSS Distributions qos = WORKQ_THREAD_QOS_CLEANUP;
1129*19c3b8c2SApple OSS Distributions }
1130*19c3b8c2SApple OSS Distributions
1131*19c3b8c2SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
1132*19c3b8c2SApple OSS Distributions
1133*19c3b8c2SApple OSS Distributions if (death_flags & WORKQ_UNPARK_FOR_DEATH_WAS_IDLE) {
1134*19c3b8c2SApple OSS Distributions wq->wq_thidlecount--;
1135*19c3b8c2SApple OSS Distributions if (first_use) {
1136*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1137*19c3b8c2SApple OSS Distributions } else {
1138*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1139*19c3b8c2SApple OSS Distributions }
1140*19c3b8c2SApple OSS Distributions }
1141*19c3b8c2SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1142*19c3b8c2SApple OSS Distributions
1143*19c3b8c2SApple OSS Distributions workq_unlock(wq);
1144*19c3b8c2SApple OSS Distributions
1145*19c3b8c2SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
1146*19c3b8c2SApple OSS Distributions __assert_only kern_return_t kr;
1147*19c3b8c2SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
1148*19c3b8c2SApple OSS Distributions assert(kr == KERN_SUCCESS);
1149*19c3b8c2SApple OSS Distributions }
1150*19c3b8c2SApple OSS Distributions
1151*19c3b8c2SApple OSS Distributions uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
1152*19c3b8c2SApple OSS Distributions thread_t th = get_machthread(uth);
1153*19c3b8c2SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1154*19c3b8c2SApple OSS Distributions
1155*19c3b8c2SApple OSS Distributions if (!first_use) {
1156*19c3b8c2SApple OSS Distributions flags |= WQ_FLAG_THREAD_REUSE;
1157*19c3b8c2SApple OSS Distributions }
1158*19c3b8c2SApple OSS Distributions
1159*19c3b8c2SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
1160*19c3b8c2SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
1161*19c3b8c2SApple OSS Distributions __builtin_unreachable();
1162*19c3b8c2SApple OSS Distributions }
1163*19c3b8c2SApple OSS Distributions
1164*19c3b8c2SApple OSS Distributions bool
workq_is_current_thread_updating_turnstile(struct workqueue * wq)1165*19c3b8c2SApple OSS Distributions workq_is_current_thread_updating_turnstile(struct workqueue *wq)
1166*19c3b8c2SApple OSS Distributions {
1167*19c3b8c2SApple OSS Distributions return wq->wq_turnstile_updater == current_thread();
1168*19c3b8c2SApple OSS Distributions }
1169*19c3b8c2SApple OSS Distributions
1170*19c3b8c2SApple OSS Distributions __attribute__((always_inline))
1171*19c3b8c2SApple OSS Distributions static inline void
1172*19c3b8c2SApple OSS Distributions workq_perform_turnstile_operation_locked(struct workqueue *wq,
1173*19c3b8c2SApple OSS Distributions void (^operation)(void))
1174*19c3b8c2SApple OSS Distributions {
1175*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
1176*19c3b8c2SApple OSS Distributions wq->wq_turnstile_updater = current_thread();
1177*19c3b8c2SApple OSS Distributions operation();
1178*19c3b8c2SApple OSS Distributions wq->wq_turnstile_updater = THREAD_NULL;
1179*19c3b8c2SApple OSS Distributions }
1180*19c3b8c2SApple OSS Distributions
1181*19c3b8c2SApple OSS Distributions static void
workq_turnstile_update_inheritor(struct workqueue * wq,turnstile_inheritor_t inheritor,turnstile_update_flags_t flags)1182*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(struct workqueue *wq,
1183*19c3b8c2SApple OSS Distributions turnstile_inheritor_t inheritor,
1184*19c3b8c2SApple OSS Distributions turnstile_update_flags_t flags)
1185*19c3b8c2SApple OSS Distributions {
1186*19c3b8c2SApple OSS Distributions if (wq->wq_inheritor == inheritor) {
1187*19c3b8c2SApple OSS Distributions return;
1188*19c3b8c2SApple OSS Distributions }
1189*19c3b8c2SApple OSS Distributions wq->wq_inheritor = inheritor;
1190*19c3b8c2SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
1191*19c3b8c2SApple OSS Distributions turnstile_update_inheritor(wq->wq_turnstile, inheritor,
1192*19c3b8c2SApple OSS Distributions flags | TURNSTILE_IMMEDIATE_UPDATE);
1193*19c3b8c2SApple OSS Distributions turnstile_update_inheritor_complete(wq->wq_turnstile,
1194*19c3b8c2SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
1195*19c3b8c2SApple OSS Distributions });
1196*19c3b8c2SApple OSS Distributions }
1197*19c3b8c2SApple OSS Distributions
1198*19c3b8c2SApple OSS Distributions static void
workq_push_idle_thread(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)1199*19c3b8c2SApple OSS Distributions workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
1200*19c3b8c2SApple OSS Distributions uint32_t setup_flags)
1201*19c3b8c2SApple OSS Distributions {
1202*19c3b8c2SApple OSS Distributions uint64_t now = mach_absolute_time();
1203*19c3b8c2SApple OSS Distributions bool is_creator = (uth == wq->wq_creator);
1204*19c3b8c2SApple OSS Distributions
1205*19c3b8c2SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
1206*19c3b8c2SApple OSS Distributions assert(!is_creator);
1207*19c3b8c2SApple OSS Distributions
1208*19c3b8c2SApple OSS Distributions thread_qos_t thread_qos = uth->uu_workq_pri.qos_req;
1209*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, thread_qos);
1210*19c3b8c2SApple OSS Distributions
1211*19c3b8c2SApple OSS Distributions /* Before we get here, we always go through
1212*19c3b8c2SApple OSS Distributions * workq_select_threadreq_or_park_and_unlock. If we got here, it means
1213*19c3b8c2SApple OSS Distributions * that we went through the logic in workq_threadreq_select which
1214*19c3b8c2SApple OSS Distributions * did the refresh for the next best cooperative qos while
1215*19c3b8c2SApple OSS Distributions * excluding the current thread - we shouldn't need to do it again.
1216*19c3b8c2SApple OSS Distributions */
1217*19c3b8c2SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
1218*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
1219*19c3b8c2SApple OSS Distributions assert(!is_creator);
1220*19c3b8c2SApple OSS Distributions
1221*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
1222*19c3b8c2SApple OSS Distributions }
1223*19c3b8c2SApple OSS Distributions
1224*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1225*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
1226*19c3b8c2SApple OSS Distributions wq->wq_threads_scheduled--;
1227*19c3b8c2SApple OSS Distributions
1228*19c3b8c2SApple OSS Distributions if (is_creator) {
1229*19c3b8c2SApple OSS Distributions wq->wq_creator = NULL;
1230*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
1231*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
1232*19c3b8c2SApple OSS Distributions }
1233*19c3b8c2SApple OSS Distributions
1234*19c3b8c2SApple OSS Distributions if (wq->wq_inheritor == get_machthread(uth)) {
1235*19c3b8c2SApple OSS Distributions assert(wq->wq_creator == NULL);
1236*19c3b8c2SApple OSS Distributions if (wq->wq_reqcount) {
1237*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
1238*19c3b8c2SApple OSS Distributions } else {
1239*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
1240*19c3b8c2SApple OSS Distributions }
1241*19c3b8c2SApple OSS Distributions }
1242*19c3b8c2SApple OSS Distributions
1243*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
1244*19c3b8c2SApple OSS Distributions assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
1245*19c3b8c2SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1246*19c3b8c2SApple OSS Distributions wq->wq_thidlecount++;
1247*19c3b8c2SApple OSS Distributions return;
1248*19c3b8c2SApple OSS Distributions }
1249*19c3b8c2SApple OSS Distributions
1250*19c3b8c2SApple OSS Distributions if (!is_creator) {
1251*19c3b8c2SApple OSS Distributions _wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
1252*19c3b8c2SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
1253*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
1254*19c3b8c2SApple OSS Distributions }
1255*19c3b8c2SApple OSS Distributions
1256*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp = now;
1257*19c3b8c2SApple OSS Distributions
1258*19c3b8c2SApple OSS Distributions struct uthread *oldest = workq_oldest_killable_idle_thread(wq);
1259*19c3b8c2SApple OSS Distributions uint16_t cur_idle = wq->wq_thidlecount;
1260*19c3b8c2SApple OSS Distributions
1261*19c3b8c2SApple OSS Distributions if (cur_idle >= wq_max_constrained_threads ||
1262*19c3b8c2SApple OSS Distributions (wq->wq_thdying_count == 0 && oldest &&
1263*19c3b8c2SApple OSS Distributions workq_should_kill_idle_thread(wq, oldest, now))) {
1264*19c3b8c2SApple OSS Distributions /*
1265*19c3b8c2SApple OSS Distributions * Immediately kill threads if we have too may of them.
1266*19c3b8c2SApple OSS Distributions *
1267*19c3b8c2SApple OSS Distributions * And swap "place" with the oldest one we'd have woken up.
1268*19c3b8c2SApple OSS Distributions * This is a relatively desperate situation where we really
1269*19c3b8c2SApple OSS Distributions * need to kill threads quickly and it's best to kill
1270*19c3b8c2SApple OSS Distributions * the one that's currently on core than context switching.
1271*19c3b8c2SApple OSS Distributions */
1272*19c3b8c2SApple OSS Distributions if (oldest) {
1273*19c3b8c2SApple OSS Distributions oldest->uu_save.uus_workq_park_data.idle_stamp = now;
1274*19c3b8c2SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, oldest, uu_workq_entry);
1275*19c3b8c2SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, oldest, uu_workq_entry);
1276*19c3b8c2SApple OSS Distributions }
1277*19c3b8c2SApple OSS Distributions
1278*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
1279*19c3b8c2SApple OSS Distributions wq, cur_idle, 0, 0);
1280*19c3b8c2SApple OSS Distributions wq->wq_thdying_count++;
1281*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
1282*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
1283*19c3b8c2SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
1284*19c3b8c2SApple OSS Distributions __builtin_unreachable();
1285*19c3b8c2SApple OSS Distributions }
1286*19c3b8c2SApple OSS Distributions
1287*19c3b8c2SApple OSS Distributions struct uthread *tail = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
1288*19c3b8c2SApple OSS Distributions
1289*19c3b8c2SApple OSS Distributions cur_idle += 1;
1290*19c3b8c2SApple OSS Distributions wq->wq_thidlecount = cur_idle;
1291*19c3b8c2SApple OSS Distributions
1292*19c3b8c2SApple OSS Distributions if (cur_idle >= wq_death_max_load && tail &&
1293*19c3b8c2SApple OSS Distributions tail->uu_save.uus_workq_park_data.has_stack) {
1294*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = false;
1295*19c3b8c2SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
1296*19c3b8c2SApple OSS Distributions } else {
1297*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = true;
1298*19c3b8c2SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, uth, uu_workq_entry);
1299*19c3b8c2SApple OSS Distributions }
1300*19c3b8c2SApple OSS Distributions
1301*19c3b8c2SApple OSS Distributions if (!tail) {
1302*19c3b8c2SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
1303*19c3b8c2SApple OSS Distributions workq_death_call_schedule(wq, now + delay);
1304*19c3b8c2SApple OSS Distributions }
1305*19c3b8c2SApple OSS Distributions }
1306*19c3b8c2SApple OSS Distributions
1307*19c3b8c2SApple OSS Distributions #pragma mark thread requests
1308*19c3b8c2SApple OSS Distributions
1309*19c3b8c2SApple OSS Distributions static inline bool
workq_tr_is_overcommit(workq_tr_flags_t tr_flags)1310*19c3b8c2SApple OSS Distributions workq_tr_is_overcommit(workq_tr_flags_t tr_flags)
1311*19c3b8c2SApple OSS Distributions {
1312*19c3b8c2SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) != 0;
1313*19c3b8c2SApple OSS Distributions }
1314*19c3b8c2SApple OSS Distributions
1315*19c3b8c2SApple OSS Distributions static inline bool
workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)1316*19c3b8c2SApple OSS Distributions workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)
1317*19c3b8c2SApple OSS Distributions {
1318*19c3b8c2SApple OSS Distributions return (tr_flags & (WORKQ_TR_FLAG_OVERCOMMIT | WORKQ_TR_FLAG_COOPERATIVE)) == 0;
1319*19c3b8c2SApple OSS Distributions }
1320*19c3b8c2SApple OSS Distributions
1321*19c3b8c2SApple OSS Distributions static inline bool
workq_tr_is_cooperative(workq_tr_flags_t tr_flags)1322*19c3b8c2SApple OSS Distributions workq_tr_is_cooperative(workq_tr_flags_t tr_flags)
1323*19c3b8c2SApple OSS Distributions {
1324*19c3b8c2SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_COOPERATIVE) != 0;
1325*19c3b8c2SApple OSS Distributions }
1326*19c3b8c2SApple OSS Distributions
1327*19c3b8c2SApple OSS Distributions #define workq_threadreq_is_overcommit(req) workq_tr_is_overcommit((req)->tr_flags)
1328*19c3b8c2SApple OSS Distributions #define workq_threadreq_is_nonovercommit(req) workq_tr_is_nonovercommit((req)->tr_flags)
1329*19c3b8c2SApple OSS Distributions #define workq_threadreq_is_cooperative(req) workq_tr_is_cooperative((req)->tr_flags)
1330*19c3b8c2SApple OSS Distributions
1331*19c3b8c2SApple OSS Distributions static inline int
workq_priority_for_req(workq_threadreq_t req)1332*19c3b8c2SApple OSS Distributions workq_priority_for_req(workq_threadreq_t req)
1333*19c3b8c2SApple OSS Distributions {
1334*19c3b8c2SApple OSS Distributions thread_qos_t qos = req->tr_qos;
1335*19c3b8c2SApple OSS Distributions
1336*19c3b8c2SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1337*19c3b8c2SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
1338*19c3b8c2SApple OSS Distributions assert(trp.trp_flags & TRP_PRIORITY);
1339*19c3b8c2SApple OSS Distributions return trp.trp_pri;
1340*19c3b8c2SApple OSS Distributions }
1341*19c3b8c2SApple OSS Distributions return thread_workq_pri_for_qos(qos);
1342*19c3b8c2SApple OSS Distributions }
1343*19c3b8c2SApple OSS Distributions
1344*19c3b8c2SApple OSS Distributions static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue * wq,workq_threadreq_t req)1345*19c3b8c2SApple OSS Distributions workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
1346*19c3b8c2SApple OSS Distributions {
1347*19c3b8c2SApple OSS Distributions assert(!workq_tr_is_cooperative(req->tr_flags));
1348*19c3b8c2SApple OSS Distributions
1349*19c3b8c2SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1350*19c3b8c2SApple OSS Distributions return &wq->wq_special_queue;
1351*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_overcommit(req->tr_flags)) {
1352*19c3b8c2SApple OSS Distributions return &wq->wq_overcommit_queue;
1353*19c3b8c2SApple OSS Distributions } else {
1354*19c3b8c2SApple OSS Distributions return &wq->wq_constrained_queue;
1355*19c3b8c2SApple OSS Distributions }
1356*19c3b8c2SApple OSS Distributions }
1357*19c3b8c2SApple OSS Distributions
1358*19c3b8c2SApple OSS Distributions
1359*19c3b8c2SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1360*19c3b8c2SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos(struct workqueue * wq,thread_qos_t qos)1361*19c3b8c2SApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos(struct workqueue *wq, thread_qos_t qos)
1362*19c3b8c2SApple OSS Distributions {
1363*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
1364*19c3b8c2SApple OSS Distributions
1365*19c3b8c2SApple OSS Distributions uint64_t num_cooperative_threads = 0;
1366*19c3b8c2SApple OSS Distributions
1367*19c3b8c2SApple OSS Distributions for (thread_qos_t cur_qos = WORKQ_THREAD_QOS_MAX; cur_qos >= qos; cur_qos--) {
1368*19c3b8c2SApple OSS Distributions uint8_t bucket = _wq_bucket(cur_qos);
1369*19c3b8c2SApple OSS Distributions num_cooperative_threads += wq->wq_cooperative_queue_scheduled_count[bucket];
1370*19c3b8c2SApple OSS Distributions }
1371*19c3b8c2SApple OSS Distributions
1372*19c3b8c2SApple OSS Distributions return num_cooperative_threads;
1373*19c3b8c2SApple OSS Distributions }
1374*19c3b8c2SApple OSS Distributions
1375*19c3b8c2SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_total(struct workqueue * wq)1376*19c3b8c2SApple OSS Distributions workq_num_cooperative_threads_scheduled_total(struct workqueue *wq)
1377*19c3b8c2SApple OSS Distributions {
1378*19c3b8c2SApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos(wq, WORKQ_THREAD_QOS_MIN);
1379*19c3b8c2SApple OSS Distributions }
1380*19c3b8c2SApple OSS Distributions
1381*19c3b8c2SApple OSS Distributions #if DEBUG || DEVELOPMENT
1382*19c3b8c2SApple OSS Distributions static bool
workq_has_cooperative_thread_requests(struct workqueue * wq)1383*19c3b8c2SApple OSS Distributions workq_has_cooperative_thread_requests(struct workqueue *wq)
1384*19c3b8c2SApple OSS Distributions {
1385*19c3b8c2SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1386*19c3b8c2SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1387*19c3b8c2SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1388*19c3b8c2SApple OSS Distributions return true;
1389*19c3b8c2SApple OSS Distributions }
1390*19c3b8c2SApple OSS Distributions }
1391*19c3b8c2SApple OSS Distributions
1392*19c3b8c2SApple OSS Distributions return false;
1393*19c3b8c2SApple OSS Distributions }
1394*19c3b8c2SApple OSS Distributions #endif
1395*19c3b8c2SApple OSS Distributions
1396*19c3b8c2SApple OSS Distributions /*
1397*19c3b8c2SApple OSS Distributions * Determines the next QoS bucket we should service next in the cooperative
1398*19c3b8c2SApple OSS Distributions * pool. This function will always return a QoS for cooperative pool as long as
1399*19c3b8c2SApple OSS Distributions * there are requests to be serviced.
1400*19c3b8c2SApple OSS Distributions *
1401*19c3b8c2SApple OSS Distributions * Unlike the other thread pools, for the cooperative thread pool the schedule
1402*19c3b8c2SApple OSS Distributions * counts for the various buckets in the pool affect the next best request for
1403*19c3b8c2SApple OSS Distributions * it.
1404*19c3b8c2SApple OSS Distributions *
1405*19c3b8c2SApple OSS Distributions * This function is called in the following contexts:
1406*19c3b8c2SApple OSS Distributions *
1407*19c3b8c2SApple OSS Distributions * a) When determining the best thread QoS for cooperative bucket for the
1408*19c3b8c2SApple OSS Distributions * creator/thread reuse
1409*19c3b8c2SApple OSS Distributions *
1410*19c3b8c2SApple OSS Distributions * b) Once (a) has happened and thread has bound to a thread request, figuring
1411*19c3b8c2SApple OSS Distributions * out whether the next best request for this pool has changed so that creator
1412*19c3b8c2SApple OSS Distributions * can be scheduled.
1413*19c3b8c2SApple OSS Distributions *
1414*19c3b8c2SApple OSS Distributions * Returns true if the cooperative queue's best qos changed from previous
1415*19c3b8c2SApple OSS Distributions * value.
1416*19c3b8c2SApple OSS Distributions */
1417*19c3b8c2SApple OSS Distributions static bool
_wq_cooperative_queue_refresh_best_req_qos(struct workqueue * wq)1418*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq)
1419*19c3b8c2SApple OSS Distributions {
1420*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
1421*19c3b8c2SApple OSS Distributions
1422*19c3b8c2SApple OSS Distributions thread_qos_t old_best_req_qos = wq->wq_cooperative_queue_best_req_qos;
1423*19c3b8c2SApple OSS Distributions
1424*19c3b8c2SApple OSS Distributions /* We determine the next best cooperative thread request based on the
1425*19c3b8c2SApple OSS Distributions * following:
1426*19c3b8c2SApple OSS Distributions *
1427*19c3b8c2SApple OSS Distributions * 1. Take the MAX of the following:
1428*19c3b8c2SApple OSS Distributions * a) Highest qos with pending TRs such that number of scheduled
1429*19c3b8c2SApple OSS Distributions * threads so far with >= qos is < wq_max_cooperative_threads
1430*19c3b8c2SApple OSS Distributions * b) Highest qos bucket with pending TRs but no scheduled threads for that bucket
1431*19c3b8c2SApple OSS Distributions *
1432*19c3b8c2SApple OSS Distributions * 2. If the result of (1) is UN, then we pick the highest priority amongst
1433*19c3b8c2SApple OSS Distributions * pending thread requests in the pool.
1434*19c3b8c2SApple OSS Distributions *
1435*19c3b8c2SApple OSS Distributions */
1436*19c3b8c2SApple OSS Distributions thread_qos_t highest_qos_with_no_scheduled = THREAD_QOS_UNSPECIFIED;
1437*19c3b8c2SApple OSS Distributions thread_qos_t highest_qos_req_with_width = THREAD_QOS_UNSPECIFIED;
1438*19c3b8c2SApple OSS Distributions
1439*19c3b8c2SApple OSS Distributions thread_qos_t highest_qos_req = THREAD_QOS_UNSPECIFIED;
1440*19c3b8c2SApple OSS Distributions
1441*19c3b8c2SApple OSS Distributions int scheduled_count_till_qos = 0;
1442*19c3b8c2SApple OSS Distributions
1443*19c3b8c2SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1444*19c3b8c2SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1445*19c3b8c2SApple OSS Distributions uint8_t scheduled_count_for_bucket = wq->wq_cooperative_queue_scheduled_count[bucket];
1446*19c3b8c2SApple OSS Distributions scheduled_count_till_qos += scheduled_count_for_bucket;
1447*19c3b8c2SApple OSS Distributions
1448*19c3b8c2SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1449*19c3b8c2SApple OSS Distributions if (qos > highest_qos_req) {
1450*19c3b8c2SApple OSS Distributions highest_qos_req = qos;
1451*19c3b8c2SApple OSS Distributions }
1452*19c3b8c2SApple OSS Distributions /*
1453*19c3b8c2SApple OSS Distributions * The pool isn't saturated for threads at and above this QoS, and
1454*19c3b8c2SApple OSS Distributions * this qos bucket has pending requests
1455*19c3b8c2SApple OSS Distributions */
1456*19c3b8c2SApple OSS Distributions if (scheduled_count_till_qos < wq_cooperative_queue_max_size(wq)) {
1457*19c3b8c2SApple OSS Distributions if (qos > highest_qos_req_with_width) {
1458*19c3b8c2SApple OSS Distributions highest_qos_req_with_width = qos;
1459*19c3b8c2SApple OSS Distributions }
1460*19c3b8c2SApple OSS Distributions }
1461*19c3b8c2SApple OSS Distributions
1462*19c3b8c2SApple OSS Distributions /*
1463*19c3b8c2SApple OSS Distributions * There are no threads scheduled for this bucket but there
1464*19c3b8c2SApple OSS Distributions * is work pending, give it at least 1 thread
1465*19c3b8c2SApple OSS Distributions */
1466*19c3b8c2SApple OSS Distributions if (scheduled_count_for_bucket == 0) {
1467*19c3b8c2SApple OSS Distributions if (qos > highest_qos_with_no_scheduled) {
1468*19c3b8c2SApple OSS Distributions highest_qos_with_no_scheduled = qos;
1469*19c3b8c2SApple OSS Distributions }
1470*19c3b8c2SApple OSS Distributions }
1471*19c3b8c2SApple OSS Distributions }
1472*19c3b8c2SApple OSS Distributions }
1473*19c3b8c2SApple OSS Distributions
1474*19c3b8c2SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = MAX(highest_qos_with_no_scheduled, highest_qos_req_with_width);
1475*19c3b8c2SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1476*19c3b8c2SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = highest_qos_req;
1477*19c3b8c2SApple OSS Distributions }
1478*19c3b8c2SApple OSS Distributions
1479*19c3b8c2SApple OSS Distributions #if DEBUG || DEVELOPMENT
1480*19c3b8c2SApple OSS Distributions /* Assert that if we are showing up the next best req as UN, then there
1481*19c3b8c2SApple OSS Distributions * actually is no thread request in the cooperative pool buckets */
1482*19c3b8c2SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1483*19c3b8c2SApple OSS Distributions assert(!workq_has_cooperative_thread_requests(wq));
1484*19c3b8c2SApple OSS Distributions }
1485*19c3b8c2SApple OSS Distributions #endif
1486*19c3b8c2SApple OSS Distributions
1487*19c3b8c2SApple OSS Distributions return old_best_req_qos != wq->wq_cooperative_queue_best_req_qos;
1488*19c3b8c2SApple OSS Distributions }
1489*19c3b8c2SApple OSS Distributions
1490*19c3b8c2SApple OSS Distributions /*
1491*19c3b8c2SApple OSS Distributions * Returns whether or not the input thread (or creator thread if uth is NULL)
1492*19c3b8c2SApple OSS Distributions * should be allowed to work as part of the cooperative pool for the <input qos>
1493*19c3b8c2SApple OSS Distributions * bucket.
1494*19c3b8c2SApple OSS Distributions *
1495*19c3b8c2SApple OSS Distributions * This function is called in a bunch of places:
1496*19c3b8c2SApple OSS Distributions * a) Quantum expires for a thread and it is part of the cooperative pool
1497*19c3b8c2SApple OSS Distributions * b) When trying to pick a thread request for the creator thread to
1498*19c3b8c2SApple OSS Distributions * represent.
1499*19c3b8c2SApple OSS Distributions * c) When a thread is trying to pick a thread request to actually bind to
1500*19c3b8c2SApple OSS Distributions * and service.
1501*19c3b8c2SApple OSS Distributions *
1502*19c3b8c2SApple OSS Distributions * Called with workq lock held.
1503*19c3b8c2SApple OSS Distributions */
1504*19c3b8c2SApple OSS Distributions
1505*19c3b8c2SApple OSS Distributions #define WQ_COOPERATIVE_POOL_UNSATURATED 1
1506*19c3b8c2SApple OSS Distributions #define WQ_COOPERATIVE_BUCKET_UNSERVICED 2
1507*19c3b8c2SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS 3
1508*19c3b8c2SApple OSS Distributions
1509*19c3b8c2SApple OSS Distributions static bool
workq_cooperative_allowance(struct workqueue * wq,thread_qos_t qos,struct uthread * uth,bool may_start_timer)1510*19c3b8c2SApple OSS Distributions workq_cooperative_allowance(struct workqueue *wq, thread_qos_t qos, struct uthread *uth,
1511*19c3b8c2SApple OSS Distributions bool may_start_timer)
1512*19c3b8c2SApple OSS Distributions {
1513*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
1514*19c3b8c2SApple OSS Distributions
1515*19c3b8c2SApple OSS Distributions bool exclude_thread_as_scheduled = false;
1516*19c3b8c2SApple OSS Distributions bool passed_admissions = false;
1517*19c3b8c2SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1518*19c3b8c2SApple OSS Distributions
1519*19c3b8c2SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
1520*19c3b8c2SApple OSS Distributions exclude_thread_as_scheduled = true;
1521*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
1522*19c3b8c2SApple OSS Distributions }
1523*19c3b8c2SApple OSS Distributions
1524*19c3b8c2SApple OSS Distributions /*
1525*19c3b8c2SApple OSS Distributions * We have not saturated the pool yet, let this thread continue
1526*19c3b8c2SApple OSS Distributions */
1527*19c3b8c2SApple OSS Distributions uint64_t total_cooperative_threads;
1528*19c3b8c2SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
1529*19c3b8c2SApple OSS Distributions if (total_cooperative_threads < wq_cooperative_queue_max_size(wq)) {
1530*19c3b8c2SApple OSS Distributions passed_admissions = true;
1531*19c3b8c2SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1532*19c3b8c2SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1533*19c3b8c2SApple OSS Distributions WQ_COOPERATIVE_POOL_UNSATURATED);
1534*19c3b8c2SApple OSS Distributions goto out;
1535*19c3b8c2SApple OSS Distributions }
1536*19c3b8c2SApple OSS Distributions
1537*19c3b8c2SApple OSS Distributions /*
1538*19c3b8c2SApple OSS Distributions * Without this thread, nothing is servicing the bucket which has pending
1539*19c3b8c2SApple OSS Distributions * work
1540*19c3b8c2SApple OSS Distributions */
1541*19c3b8c2SApple OSS Distributions uint64_t bucket_scheduled = wq->wq_cooperative_queue_scheduled_count[bucket];
1542*19c3b8c2SApple OSS Distributions if (bucket_scheduled == 0 &&
1543*19c3b8c2SApple OSS Distributions !STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1544*19c3b8c2SApple OSS Distributions passed_admissions = true;
1545*19c3b8c2SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1546*19c3b8c2SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1547*19c3b8c2SApple OSS Distributions WQ_COOPERATIVE_BUCKET_UNSERVICED);
1548*19c3b8c2SApple OSS Distributions goto out;
1549*19c3b8c2SApple OSS Distributions }
1550*19c3b8c2SApple OSS Distributions
1551*19c3b8c2SApple OSS Distributions /*
1552*19c3b8c2SApple OSS Distributions * If number of threads at the QoS bucket >= input QoS exceeds the max we want
1553*19c3b8c2SApple OSS Distributions * for the pool, deny this thread
1554*19c3b8c2SApple OSS Distributions */
1555*19c3b8c2SApple OSS Distributions uint64_t aggregate_down_to_qos = workq_num_cooperative_threads_scheduled_to_qos(wq, qos);
1556*19c3b8c2SApple OSS Distributions passed_admissions = (aggregate_down_to_qos < wq_cooperative_queue_max_size(wq));
1557*19c3b8c2SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE, aggregate_down_to_qos,
1558*19c3b8c2SApple OSS Distributions qos, passed_admissions, WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS);
1559*19c3b8c2SApple OSS Distributions
1560*19c3b8c2SApple OSS Distributions if (!passed_admissions && may_start_timer) {
1561*19c3b8c2SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
1562*19c3b8c2SApple OSS Distributions }
1563*19c3b8c2SApple OSS Distributions
1564*19c3b8c2SApple OSS Distributions out:
1565*19c3b8c2SApple OSS Distributions if (exclude_thread_as_scheduled) {
1566*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
1567*19c3b8c2SApple OSS Distributions }
1568*19c3b8c2SApple OSS Distributions return passed_admissions;
1569*19c3b8c2SApple OSS Distributions }
1570*19c3b8c2SApple OSS Distributions
1571*19c3b8c2SApple OSS Distributions /*
1572*19c3b8c2SApple OSS Distributions * returns true if the best request for the pool changed as a result of
1573*19c3b8c2SApple OSS Distributions * enqueuing this thread request.
1574*19c3b8c2SApple OSS Distributions */
1575*19c3b8c2SApple OSS Distributions static bool
workq_threadreq_enqueue(struct workqueue * wq,workq_threadreq_t req)1576*19c3b8c2SApple OSS Distributions workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
1577*19c3b8c2SApple OSS Distributions {
1578*19c3b8c2SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_NEW);
1579*19c3b8c2SApple OSS Distributions
1580*19c3b8c2SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_QUEUED;
1581*19c3b8c2SApple OSS Distributions wq->wq_reqcount += req->tr_count;
1582*19c3b8c2SApple OSS Distributions
1583*19c3b8c2SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1584*19c3b8c2SApple OSS Distributions assert(wq->wq_event_manager_threadreq == NULL);
1585*19c3b8c2SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
1586*19c3b8c2SApple OSS Distributions assert(req->tr_count == 1);
1587*19c3b8c2SApple OSS Distributions wq->wq_event_manager_threadreq = req;
1588*19c3b8c2SApple OSS Distributions return true;
1589*19c3b8c2SApple OSS Distributions }
1590*19c3b8c2SApple OSS Distributions
1591*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1592*19c3b8c2SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1593*19c3b8c2SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1594*19c3b8c2SApple OSS Distributions
1595*19c3b8c2SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1596*19c3b8c2SApple OSS Distributions STAILQ_INSERT_TAIL(bucket, req, tr_link);
1597*19c3b8c2SApple OSS Distributions
1598*19c3b8c2SApple OSS Distributions return _wq_cooperative_queue_refresh_best_req_qos(wq);
1599*19c3b8c2SApple OSS Distributions }
1600*19c3b8c2SApple OSS Distributions
1601*19c3b8c2SApple OSS Distributions struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
1602*19c3b8c2SApple OSS Distributions
1603*19c3b8c2SApple OSS Distributions priority_queue_entry_set_sched_pri(q, &req->tr_entry,
1604*19c3b8c2SApple OSS Distributions workq_priority_for_req(req), false);
1605*19c3b8c2SApple OSS Distributions
1606*19c3b8c2SApple OSS Distributions if (priority_queue_insert(q, &req->tr_entry)) {
1607*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1608*19c3b8c2SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1609*19c3b8c2SApple OSS Distributions }
1610*19c3b8c2SApple OSS Distributions return true;
1611*19c3b8c2SApple OSS Distributions }
1612*19c3b8c2SApple OSS Distributions return false;
1613*19c3b8c2SApple OSS Distributions }
1614*19c3b8c2SApple OSS Distributions
1615*19c3b8c2SApple OSS Distributions /*
1616*19c3b8c2SApple OSS Distributions * returns true if one of the following is true (so as to update creator if
1617*19c3b8c2SApple OSS Distributions * needed):
1618*19c3b8c2SApple OSS Distributions *
1619*19c3b8c2SApple OSS Distributions * (a) the next highest request of the pool we dequeued the request from changed
1620*19c3b8c2SApple OSS Distributions * (b) the next highest requests of the pool the current thread used to be a
1621*19c3b8c2SApple OSS Distributions * part of, changed
1622*19c3b8c2SApple OSS Distributions *
1623*19c3b8c2SApple OSS Distributions * For overcommit, special and constrained pools, the next highest QoS for each
1624*19c3b8c2SApple OSS Distributions * pool just a MAX of pending requests so tracking (a) is sufficient.
1625*19c3b8c2SApple OSS Distributions *
1626*19c3b8c2SApple OSS Distributions * But for cooperative thread pool, the next highest QoS for the pool depends on
1627*19c3b8c2SApple OSS Distributions * schedule counts in the pool as well. So if the current thread used to be
1628*19c3b8c2SApple OSS Distributions * cooperative in it's previous logical run ie (b), then that can also affect
1629*19c3b8c2SApple OSS Distributions * cooperative pool's next best QoS requests.
1630*19c3b8c2SApple OSS Distributions */
1631*19c3b8c2SApple OSS Distributions static bool
workq_threadreq_dequeue(struct workqueue * wq,workq_threadreq_t req,bool cooperative_sched_count_changed)1632*19c3b8c2SApple OSS Distributions workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req,
1633*19c3b8c2SApple OSS Distributions bool cooperative_sched_count_changed)
1634*19c3b8c2SApple OSS Distributions {
1635*19c3b8c2SApple OSS Distributions wq->wq_reqcount--;
1636*19c3b8c2SApple OSS Distributions
1637*19c3b8c2SApple OSS Distributions bool next_highest_request_changed = false;
1638*19c3b8c2SApple OSS Distributions
1639*19c3b8c2SApple OSS Distributions if (--req->tr_count == 0) {
1640*19c3b8c2SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1641*19c3b8c2SApple OSS Distributions assert(wq->wq_event_manager_threadreq == req);
1642*19c3b8c2SApple OSS Distributions assert(req->tr_count == 0);
1643*19c3b8c2SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
1644*19c3b8c2SApple OSS Distributions
1645*19c3b8c2SApple OSS Distributions /* If a cooperative thread was the one which picked up the manager
1646*19c3b8c2SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool
1647*19c3b8c2SApple OSS Distributions * anyways.
1648*19c3b8c2SApple OSS Distributions */
1649*19c3b8c2SApple OSS Distributions if (cooperative_sched_count_changed) {
1650*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
1651*19c3b8c2SApple OSS Distributions }
1652*19c3b8c2SApple OSS Distributions return true;
1653*19c3b8c2SApple OSS Distributions }
1654*19c3b8c2SApple OSS Distributions
1655*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1656*19c3b8c2SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1657*19c3b8c2SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1658*19c3b8c2SApple OSS Distributions /* Account for the fact that BG and MT are coalesced when
1659*19c3b8c2SApple OSS Distributions * calculating best request for cooperative pool
1660*19c3b8c2SApple OSS Distributions */
1661*19c3b8c2SApple OSS Distributions assert(_wq_bucket(req->tr_qos) == _wq_bucket(wq->wq_cooperative_queue_best_req_qos));
1662*19c3b8c2SApple OSS Distributions
1663*19c3b8c2SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1664*19c3b8c2SApple OSS Distributions __assert_only workq_threadreq_t head = STAILQ_FIRST(bucket);
1665*19c3b8c2SApple OSS Distributions
1666*19c3b8c2SApple OSS Distributions assert(head == req);
1667*19c3b8c2SApple OSS Distributions STAILQ_REMOVE_HEAD(bucket, tr_link);
1668*19c3b8c2SApple OSS Distributions
1669*19c3b8c2SApple OSS Distributions /*
1670*19c3b8c2SApple OSS Distributions * If the request we're dequeueing is cooperative, then the sched
1671*19c3b8c2SApple OSS Distributions * counts definitely changed.
1672*19c3b8c2SApple OSS Distributions */
1673*19c3b8c2SApple OSS Distributions assert(cooperative_sched_count_changed);
1674*19c3b8c2SApple OSS Distributions }
1675*19c3b8c2SApple OSS Distributions
1676*19c3b8c2SApple OSS Distributions /*
1677*19c3b8c2SApple OSS Distributions * We want to do the cooperative pool refresh after dequeueing a
1678*19c3b8c2SApple OSS Distributions * cooperative thread request if any (to combine both effects into 1
1679*19c3b8c2SApple OSS Distributions * refresh operation)
1680*19c3b8c2SApple OSS Distributions */
1681*19c3b8c2SApple OSS Distributions if (cooperative_sched_count_changed) {
1682*19c3b8c2SApple OSS Distributions next_highest_request_changed = _wq_cooperative_queue_refresh_best_req_qos(wq);
1683*19c3b8c2SApple OSS Distributions }
1684*19c3b8c2SApple OSS Distributions
1685*19c3b8c2SApple OSS Distributions if (!workq_threadreq_is_cooperative(req)) {
1686*19c3b8c2SApple OSS Distributions /*
1687*19c3b8c2SApple OSS Distributions * All other types of requests are enqueued in priority queues
1688*19c3b8c2SApple OSS Distributions */
1689*19c3b8c2SApple OSS Distributions
1690*19c3b8c2SApple OSS Distributions if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
1691*19c3b8c2SApple OSS Distributions &req->tr_entry)) {
1692*19c3b8c2SApple OSS Distributions next_highest_request_changed |= true;
1693*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1694*19c3b8c2SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1695*19c3b8c2SApple OSS Distributions }
1696*19c3b8c2SApple OSS Distributions }
1697*19c3b8c2SApple OSS Distributions }
1698*19c3b8c2SApple OSS Distributions }
1699*19c3b8c2SApple OSS Distributions
1700*19c3b8c2SApple OSS Distributions return next_highest_request_changed;
1701*19c3b8c2SApple OSS Distributions }
1702*19c3b8c2SApple OSS Distributions
1703*19c3b8c2SApple OSS Distributions static void
workq_threadreq_destroy(proc_t p,workq_threadreq_t req)1704*19c3b8c2SApple OSS Distributions workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
1705*19c3b8c2SApple OSS Distributions {
1706*19c3b8c2SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_CANCELED;
1707*19c3b8c2SApple OSS Distributions if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
1708*19c3b8c2SApple OSS Distributions kqueue_threadreq_cancel(p, req);
1709*19c3b8c2SApple OSS Distributions } else {
1710*19c3b8c2SApple OSS Distributions zfree(workq_zone_threadreq, req);
1711*19c3b8c2SApple OSS Distributions }
1712*19c3b8c2SApple OSS Distributions }
1713*19c3b8c2SApple OSS Distributions
1714*19c3b8c2SApple OSS Distributions #pragma mark workqueue thread creation thread calls
1715*19c3b8c2SApple OSS Distributions
1716*19c3b8c2SApple OSS Distributions static inline bool
workq_thread_call_prepost(struct workqueue * wq,uint32_t sched,uint32_t pend,uint32_t fail_mask)1717*19c3b8c2SApple OSS Distributions workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
1718*19c3b8c2SApple OSS Distributions uint32_t fail_mask)
1719*19c3b8c2SApple OSS Distributions {
1720*19c3b8c2SApple OSS Distributions uint32_t old_flags, new_flags;
1721*19c3b8c2SApple OSS Distributions
1722*19c3b8c2SApple OSS Distributions os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
1723*19c3b8c2SApple OSS Distributions if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
1724*19c3b8c2SApple OSS Distributions os_atomic_rmw_loop_give_up(return false);
1725*19c3b8c2SApple OSS Distributions }
1726*19c3b8c2SApple OSS Distributions if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
1727*19c3b8c2SApple OSS Distributions new_flags = old_flags | pend;
1728*19c3b8c2SApple OSS Distributions } else {
1729*19c3b8c2SApple OSS Distributions new_flags = old_flags | sched;
1730*19c3b8c2SApple OSS Distributions }
1731*19c3b8c2SApple OSS Distributions });
1732*19c3b8c2SApple OSS Distributions
1733*19c3b8c2SApple OSS Distributions return (old_flags & WQ_PROC_SUSPENDED) == 0;
1734*19c3b8c2SApple OSS Distributions }
1735*19c3b8c2SApple OSS Distributions
1736*19c3b8c2SApple OSS Distributions #define WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART 0x1
1737*19c3b8c2SApple OSS Distributions
1738*19c3b8c2SApple OSS Distributions static bool
workq_schedule_delayed_thread_creation(struct workqueue * wq,int flags)1739*19c3b8c2SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags)
1740*19c3b8c2SApple OSS Distributions {
1741*19c3b8c2SApple OSS Distributions assert(!preemption_enabled());
1742*19c3b8c2SApple OSS Distributions
1743*19c3b8c2SApple OSS Distributions if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
1744*19c3b8c2SApple OSS Distributions WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
1745*19c3b8c2SApple OSS Distributions WQ_IMMEDIATE_CALL_SCHEDULED)) {
1746*19c3b8c2SApple OSS Distributions return false;
1747*19c3b8c2SApple OSS Distributions }
1748*19c3b8c2SApple OSS Distributions
1749*19c3b8c2SApple OSS Distributions uint64_t now = mach_absolute_time();
1750*19c3b8c2SApple OSS Distributions
1751*19c3b8c2SApple OSS Distributions if (flags & WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART) {
1752*19c3b8c2SApple OSS Distributions /* do not change the window */
1753*19c3b8c2SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
1754*19c3b8c2SApple OSS Distributions wq->wq_timer_interval *= 2;
1755*19c3b8c2SApple OSS Distributions if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
1756*19c3b8c2SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
1757*19c3b8c2SApple OSS Distributions }
1758*19c3b8c2SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
1759*19c3b8c2SApple OSS Distributions wq->wq_timer_interval /= 2;
1760*19c3b8c2SApple OSS Distributions if (wq->wq_timer_interval < wq_stalled_window.abstime) {
1761*19c3b8c2SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
1762*19c3b8c2SApple OSS Distributions }
1763*19c3b8c2SApple OSS Distributions }
1764*19c3b8c2SApple OSS Distributions
1765*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1766*19c3b8c2SApple OSS Distributions _wq_flags(wq), wq->wq_timer_interval);
1767*19c3b8c2SApple OSS Distributions
1768*19c3b8c2SApple OSS Distributions thread_call_t call = wq->wq_delayed_call;
1769*19c3b8c2SApple OSS Distributions uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
1770*19c3b8c2SApple OSS Distributions uint64_t deadline = now + wq->wq_timer_interval;
1771*19c3b8c2SApple OSS Distributions if (thread_call_enter1_delayed(call, (void *)arg, deadline)) {
1772*19c3b8c2SApple OSS Distributions panic("delayed_call was already enqueued");
1773*19c3b8c2SApple OSS Distributions }
1774*19c3b8c2SApple OSS Distributions return true;
1775*19c3b8c2SApple OSS Distributions }
1776*19c3b8c2SApple OSS Distributions
1777*19c3b8c2SApple OSS Distributions static void
workq_schedule_immediate_thread_creation(struct workqueue * wq)1778*19c3b8c2SApple OSS Distributions workq_schedule_immediate_thread_creation(struct workqueue *wq)
1779*19c3b8c2SApple OSS Distributions {
1780*19c3b8c2SApple OSS Distributions assert(!preemption_enabled());
1781*19c3b8c2SApple OSS Distributions
1782*19c3b8c2SApple OSS Distributions if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
1783*19c3b8c2SApple OSS Distributions WQ_IMMEDIATE_CALL_PENDED, 0)) {
1784*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1785*19c3b8c2SApple OSS Distributions _wq_flags(wq), 0);
1786*19c3b8c2SApple OSS Distributions
1787*19c3b8c2SApple OSS Distributions uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
1788*19c3b8c2SApple OSS Distributions if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
1789*19c3b8c2SApple OSS Distributions panic("immediate_call was already enqueued");
1790*19c3b8c2SApple OSS Distributions }
1791*19c3b8c2SApple OSS Distributions }
1792*19c3b8c2SApple OSS Distributions }
1793*19c3b8c2SApple OSS Distributions
1794*19c3b8c2SApple OSS Distributions void
workq_proc_suspended(struct proc * p)1795*19c3b8c2SApple OSS Distributions workq_proc_suspended(struct proc *p)
1796*19c3b8c2SApple OSS Distributions {
1797*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1798*19c3b8c2SApple OSS Distributions
1799*19c3b8c2SApple OSS Distributions if (wq) {
1800*19c3b8c2SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
1801*19c3b8c2SApple OSS Distributions }
1802*19c3b8c2SApple OSS Distributions }
1803*19c3b8c2SApple OSS Distributions
1804*19c3b8c2SApple OSS Distributions void
workq_proc_resumed(struct proc * p)1805*19c3b8c2SApple OSS Distributions workq_proc_resumed(struct proc *p)
1806*19c3b8c2SApple OSS Distributions {
1807*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1808*19c3b8c2SApple OSS Distributions uint32_t wq_flags;
1809*19c3b8c2SApple OSS Distributions
1810*19c3b8c2SApple OSS Distributions if (!wq) {
1811*19c3b8c2SApple OSS Distributions return;
1812*19c3b8c2SApple OSS Distributions }
1813*19c3b8c2SApple OSS Distributions
1814*19c3b8c2SApple OSS Distributions wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
1815*19c3b8c2SApple OSS Distributions WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
1816*19c3b8c2SApple OSS Distributions if ((wq_flags & WQ_EXITING) == 0) {
1817*19c3b8c2SApple OSS Distributions disable_preemption();
1818*19c3b8c2SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
1819*19c3b8c2SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
1820*19c3b8c2SApple OSS Distributions } else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
1821*19c3b8c2SApple OSS Distributions workq_schedule_delayed_thread_creation(wq,
1822*19c3b8c2SApple OSS Distributions WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
1823*19c3b8c2SApple OSS Distributions }
1824*19c3b8c2SApple OSS Distributions enable_preemption();
1825*19c3b8c2SApple OSS Distributions }
1826*19c3b8c2SApple OSS Distributions }
1827*19c3b8c2SApple OSS Distributions
1828*19c3b8c2SApple OSS Distributions /**
1829*19c3b8c2SApple OSS Distributions * returns whether lastblocked_tsp is within wq_stalled_window usecs of now
1830*19c3b8c2SApple OSS Distributions */
1831*19c3b8c2SApple OSS Distributions static bool
workq_thread_is_busy(uint64_t now,_Atomic uint64_t * lastblocked_tsp)1832*19c3b8c2SApple OSS Distributions workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
1833*19c3b8c2SApple OSS Distributions {
1834*19c3b8c2SApple OSS Distributions uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
1835*19c3b8c2SApple OSS Distributions if (now <= lastblocked_ts) {
1836*19c3b8c2SApple OSS Distributions /*
1837*19c3b8c2SApple OSS Distributions * Because the update of the timestamp when a thread blocks
1838*19c3b8c2SApple OSS Distributions * isn't serialized against us looking at it (i.e. we don't hold
1839*19c3b8c2SApple OSS Distributions * the workq lock), it's possible to have a timestamp that matches
1840*19c3b8c2SApple OSS Distributions * the current time or that even looks to be in the future relative
1841*19c3b8c2SApple OSS Distributions * to when we grabbed the current time...
1842*19c3b8c2SApple OSS Distributions *
1843*19c3b8c2SApple OSS Distributions * Just treat this as a busy thread since it must have just blocked.
1844*19c3b8c2SApple OSS Distributions */
1845*19c3b8c2SApple OSS Distributions return true;
1846*19c3b8c2SApple OSS Distributions }
1847*19c3b8c2SApple OSS Distributions return (now - lastblocked_ts) < wq_stalled_window.abstime;
1848*19c3b8c2SApple OSS Distributions }
1849*19c3b8c2SApple OSS Distributions
1850*19c3b8c2SApple OSS Distributions static void
workq_add_new_threads_call(void * _p,void * flags)1851*19c3b8c2SApple OSS Distributions workq_add_new_threads_call(void *_p, void *flags)
1852*19c3b8c2SApple OSS Distributions {
1853*19c3b8c2SApple OSS Distributions proc_t p = _p;
1854*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1855*19c3b8c2SApple OSS Distributions uint32_t my_flag = (uint32_t)(uintptr_t)flags;
1856*19c3b8c2SApple OSS Distributions
1857*19c3b8c2SApple OSS Distributions /*
1858*19c3b8c2SApple OSS Distributions * workq_exit() will set the workqueue to NULL before
1859*19c3b8c2SApple OSS Distributions * it cancels thread calls.
1860*19c3b8c2SApple OSS Distributions */
1861*19c3b8c2SApple OSS Distributions if (!wq) {
1862*19c3b8c2SApple OSS Distributions return;
1863*19c3b8c2SApple OSS Distributions }
1864*19c3b8c2SApple OSS Distributions
1865*19c3b8c2SApple OSS Distributions assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
1866*19c3b8c2SApple OSS Distributions (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
1867*19c3b8c2SApple OSS Distributions
1868*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
1869*19c3b8c2SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1870*19c3b8c2SApple OSS Distributions
1871*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
1872*19c3b8c2SApple OSS Distributions
1873*19c3b8c2SApple OSS Distributions wq->wq_thread_call_last_run = mach_absolute_time();
1874*19c3b8c2SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, my_flag, release);
1875*19c3b8c2SApple OSS Distributions
1876*19c3b8c2SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
1877*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
1878*19c3b8c2SApple OSS Distributions
1879*19c3b8c2SApple OSS Distributions workq_unlock(wq);
1880*19c3b8c2SApple OSS Distributions
1881*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
1882*19c3b8c2SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1883*19c3b8c2SApple OSS Distributions }
1884*19c3b8c2SApple OSS Distributions
1885*19c3b8c2SApple OSS Distributions #pragma mark thread state tracking
1886*19c3b8c2SApple OSS Distributions
1887*19c3b8c2SApple OSS Distributions static void
workq_sched_callback(int type,thread_t thread)1888*19c3b8c2SApple OSS Distributions workq_sched_callback(int type, thread_t thread)
1889*19c3b8c2SApple OSS Distributions {
1890*19c3b8c2SApple OSS Distributions thread_ro_t tro = get_thread_ro(thread);
1891*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
1892*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(tro->tro_proc);
1893*19c3b8c2SApple OSS Distributions thread_qos_t req_qos, qos = uth->uu_workq_pri.qos_bucket;
1894*19c3b8c2SApple OSS Distributions wq_thactive_t old_thactive;
1895*19c3b8c2SApple OSS Distributions bool start_timer = false;
1896*19c3b8c2SApple OSS Distributions
1897*19c3b8c2SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
1898*19c3b8c2SApple OSS Distributions return;
1899*19c3b8c2SApple OSS Distributions }
1900*19c3b8c2SApple OSS Distributions
1901*19c3b8c2SApple OSS Distributions switch (type) {
1902*19c3b8c2SApple OSS Distributions case SCHED_CALL_BLOCK:
1903*19c3b8c2SApple OSS Distributions old_thactive = _wq_thactive_dec(wq, qos);
1904*19c3b8c2SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1905*19c3b8c2SApple OSS Distributions
1906*19c3b8c2SApple OSS Distributions /*
1907*19c3b8c2SApple OSS Distributions * Remember the timestamp of the last thread that blocked in this
1908*19c3b8c2SApple OSS Distributions * bucket, it used used by admission checks to ignore one thread
1909*19c3b8c2SApple OSS Distributions * being inactive if this timestamp is recent enough.
1910*19c3b8c2SApple OSS Distributions *
1911*19c3b8c2SApple OSS Distributions * If we collide with another thread trying to update the
1912*19c3b8c2SApple OSS Distributions * last_blocked (really unlikely since another thread would have to
1913*19c3b8c2SApple OSS Distributions * get scheduled and then block after we start down this path), it's
1914*19c3b8c2SApple OSS Distributions * not a problem. Either timestamp is adequate, so no need to retry
1915*19c3b8c2SApple OSS Distributions */
1916*19c3b8c2SApple OSS Distributions os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
1917*19c3b8c2SApple OSS Distributions thread_last_run_time(thread), relaxed);
1918*19c3b8c2SApple OSS Distributions
1919*19c3b8c2SApple OSS Distributions if (req_qos == THREAD_QOS_UNSPECIFIED) {
1920*19c3b8c2SApple OSS Distributions /*
1921*19c3b8c2SApple OSS Distributions * No pending request at the moment we could unblock, move on.
1922*19c3b8c2SApple OSS Distributions */
1923*19c3b8c2SApple OSS Distributions } else if (qos < req_qos) {
1924*19c3b8c2SApple OSS Distributions /*
1925*19c3b8c2SApple OSS Distributions * The blocking thread is at a lower QoS than the highest currently
1926*19c3b8c2SApple OSS Distributions * pending constrained request, nothing has to be redriven
1927*19c3b8c2SApple OSS Distributions */
1928*19c3b8c2SApple OSS Distributions } else {
1929*19c3b8c2SApple OSS Distributions uint32_t max_busycount, old_req_count;
1930*19c3b8c2SApple OSS Distributions old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
1931*19c3b8c2SApple OSS Distributions req_qos, NULL, &max_busycount);
1932*19c3b8c2SApple OSS Distributions /*
1933*19c3b8c2SApple OSS Distributions * If it is possible that may_start_constrained_thread had refused
1934*19c3b8c2SApple OSS Distributions * admission due to being over the max concurrency, we may need to
1935*19c3b8c2SApple OSS Distributions * spin up a new thread.
1936*19c3b8c2SApple OSS Distributions *
1937*19c3b8c2SApple OSS Distributions * We take into account the maximum number of busy threads
1938*19c3b8c2SApple OSS Distributions * that can affect may_start_constrained_thread as looking at the
1939*19c3b8c2SApple OSS Distributions * actual number may_start_constrained_thread will see is racy.
1940*19c3b8c2SApple OSS Distributions *
1941*19c3b8c2SApple OSS Distributions * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
1942*19c3b8c2SApple OSS Distributions * between NCPU (4) and NCPU - 2 (2) we need to redrive.
1943*19c3b8c2SApple OSS Distributions */
1944*19c3b8c2SApple OSS Distributions uint32_t conc = wq_max_parallelism[_wq_bucket(qos)];
1945*19c3b8c2SApple OSS Distributions if (old_req_count <= conc && conc <= old_req_count + max_busycount) {
1946*19c3b8c2SApple OSS Distributions start_timer = workq_schedule_delayed_thread_creation(wq, 0);
1947*19c3b8c2SApple OSS Distributions }
1948*19c3b8c2SApple OSS Distributions }
1949*19c3b8c2SApple OSS Distributions if (__improbable(kdebug_enable)) {
1950*19c3b8c2SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
1951*19c3b8c2SApple OSS Distributions old_thactive, qos, NULL, NULL);
1952*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
1953*19c3b8c2SApple OSS Distributions old - 1, qos | (req_qos << 8),
1954*19c3b8c2SApple OSS Distributions wq->wq_reqcount << 1 | start_timer);
1955*19c3b8c2SApple OSS Distributions }
1956*19c3b8c2SApple OSS Distributions break;
1957*19c3b8c2SApple OSS Distributions
1958*19c3b8c2SApple OSS Distributions case SCHED_CALL_UNBLOCK:
1959*19c3b8c2SApple OSS Distributions /*
1960*19c3b8c2SApple OSS Distributions * we cannot take the workqueue_lock here...
1961*19c3b8c2SApple OSS Distributions * an UNBLOCK can occur from a timer event which
1962*19c3b8c2SApple OSS Distributions * is run from an interrupt context... if the workqueue_lock
1963*19c3b8c2SApple OSS Distributions * is already held by this processor, we'll deadlock...
1964*19c3b8c2SApple OSS Distributions * the thread lock for the thread being UNBLOCKED
1965*19c3b8c2SApple OSS Distributions * is also held
1966*19c3b8c2SApple OSS Distributions */
1967*19c3b8c2SApple OSS Distributions old_thactive = _wq_thactive_inc(wq, qos);
1968*19c3b8c2SApple OSS Distributions if (__improbable(kdebug_enable)) {
1969*19c3b8c2SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
1970*19c3b8c2SApple OSS Distributions old_thactive, qos, NULL, NULL);
1971*19c3b8c2SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1972*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
1973*19c3b8c2SApple OSS Distributions old + 1, qos | (req_qos << 8),
1974*19c3b8c2SApple OSS Distributions wq->wq_threads_scheduled);
1975*19c3b8c2SApple OSS Distributions }
1976*19c3b8c2SApple OSS Distributions break;
1977*19c3b8c2SApple OSS Distributions }
1978*19c3b8c2SApple OSS Distributions }
1979*19c3b8c2SApple OSS Distributions
1980*19c3b8c2SApple OSS Distributions #pragma mark workq lifecycle
1981*19c3b8c2SApple OSS Distributions
1982*19c3b8c2SApple OSS Distributions void
workq_reference(struct workqueue * wq)1983*19c3b8c2SApple OSS Distributions workq_reference(struct workqueue *wq)
1984*19c3b8c2SApple OSS Distributions {
1985*19c3b8c2SApple OSS Distributions os_ref_retain(&wq->wq_refcnt);
1986*19c3b8c2SApple OSS Distributions }
1987*19c3b8c2SApple OSS Distributions
1988*19c3b8c2SApple OSS Distributions static void
workq_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)1989*19c3b8c2SApple OSS Distributions workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
1990*19c3b8c2SApple OSS Distributions __assert_only mpsc_daemon_queue_t dq)
1991*19c3b8c2SApple OSS Distributions {
1992*19c3b8c2SApple OSS Distributions struct workqueue *wq;
1993*19c3b8c2SApple OSS Distributions struct turnstile *ts;
1994*19c3b8c2SApple OSS Distributions
1995*19c3b8c2SApple OSS Distributions wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
1996*19c3b8c2SApple OSS Distributions assert(dq == &workq_deallocate_queue);
1997*19c3b8c2SApple OSS Distributions
1998*19c3b8c2SApple OSS Distributions turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
1999*19c3b8c2SApple OSS Distributions assert(ts);
2000*19c3b8c2SApple OSS Distributions turnstile_cleanup();
2001*19c3b8c2SApple OSS Distributions turnstile_deallocate(ts);
2002*19c3b8c2SApple OSS Distributions
2003*19c3b8c2SApple OSS Distributions lck_ticket_destroy(&wq->wq_lock, &workq_lck_grp);
2004*19c3b8c2SApple OSS Distributions zfree(workq_zone_workqueue, wq);
2005*19c3b8c2SApple OSS Distributions }
2006*19c3b8c2SApple OSS Distributions
2007*19c3b8c2SApple OSS Distributions static void
workq_deallocate(struct workqueue * wq)2008*19c3b8c2SApple OSS Distributions workq_deallocate(struct workqueue *wq)
2009*19c3b8c2SApple OSS Distributions {
2010*19c3b8c2SApple OSS Distributions if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
2011*19c3b8c2SApple OSS Distributions workq_deallocate_queue_invoke(&wq->wq_destroy_link,
2012*19c3b8c2SApple OSS Distributions &workq_deallocate_queue);
2013*19c3b8c2SApple OSS Distributions }
2014*19c3b8c2SApple OSS Distributions }
2015*19c3b8c2SApple OSS Distributions
2016*19c3b8c2SApple OSS Distributions void
workq_deallocate_safe(struct workqueue * wq)2017*19c3b8c2SApple OSS Distributions workq_deallocate_safe(struct workqueue *wq)
2018*19c3b8c2SApple OSS Distributions {
2019*19c3b8c2SApple OSS Distributions if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
2020*19c3b8c2SApple OSS Distributions mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
2021*19c3b8c2SApple OSS Distributions MPSC_QUEUE_DISABLE_PREEMPTION);
2022*19c3b8c2SApple OSS Distributions }
2023*19c3b8c2SApple OSS Distributions }
2024*19c3b8c2SApple OSS Distributions
2025*19c3b8c2SApple OSS Distributions /**
2026*19c3b8c2SApple OSS Distributions * Setup per-process state for the workqueue.
2027*19c3b8c2SApple OSS Distributions */
2028*19c3b8c2SApple OSS Distributions int
workq_open(struct proc * p,__unused struct workq_open_args * uap,__unused int32_t * retval)2029*19c3b8c2SApple OSS Distributions workq_open(struct proc *p, __unused struct workq_open_args *uap,
2030*19c3b8c2SApple OSS Distributions __unused int32_t *retval)
2031*19c3b8c2SApple OSS Distributions {
2032*19c3b8c2SApple OSS Distributions struct workqueue *wq;
2033*19c3b8c2SApple OSS Distributions int error = 0;
2034*19c3b8c2SApple OSS Distributions
2035*19c3b8c2SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
2036*19c3b8c2SApple OSS Distributions return EINVAL;
2037*19c3b8c2SApple OSS Distributions }
2038*19c3b8c2SApple OSS Distributions
2039*19c3b8c2SApple OSS Distributions if (wq_init_constrained_limit) {
2040*19c3b8c2SApple OSS Distributions uint32_t limit, num_cpus = ml_wait_max_cpus();
2041*19c3b8c2SApple OSS Distributions
2042*19c3b8c2SApple OSS Distributions /*
2043*19c3b8c2SApple OSS Distributions * set up the limit for the constrained pool
2044*19c3b8c2SApple OSS Distributions * this is a virtual pool in that we don't
2045*19c3b8c2SApple OSS Distributions * maintain it on a separate idle and run list
2046*19c3b8c2SApple OSS Distributions */
2047*19c3b8c2SApple OSS Distributions limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
2048*19c3b8c2SApple OSS Distributions
2049*19c3b8c2SApple OSS Distributions if (limit > wq_max_constrained_threads) {
2050*19c3b8c2SApple OSS Distributions wq_max_constrained_threads = limit;
2051*19c3b8c2SApple OSS Distributions }
2052*19c3b8c2SApple OSS Distributions
2053*19c3b8c2SApple OSS Distributions if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
2054*19c3b8c2SApple OSS Distributions wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
2055*19c3b8c2SApple OSS Distributions }
2056*19c3b8c2SApple OSS Distributions if (wq_max_threads > CONFIG_THREAD_MAX - 20) {
2057*19c3b8c2SApple OSS Distributions wq_max_threads = CONFIG_THREAD_MAX - 20;
2058*19c3b8c2SApple OSS Distributions }
2059*19c3b8c2SApple OSS Distributions
2060*19c3b8c2SApple OSS Distributions wq_death_max_load = (uint16_t)fls(num_cpus) + 1;
2061*19c3b8c2SApple OSS Distributions
2062*19c3b8c2SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
2063*19c3b8c2SApple OSS Distributions wq_max_parallelism[_wq_bucket(qos)] =
2064*19c3b8c2SApple OSS Distributions qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
2065*19c3b8c2SApple OSS Distributions }
2066*19c3b8c2SApple OSS Distributions
2067*19c3b8c2SApple OSS Distributions wq_max_cooperative_threads = num_cpus;
2068*19c3b8c2SApple OSS Distributions
2069*19c3b8c2SApple OSS Distributions wq_init_constrained_limit = 0;
2070*19c3b8c2SApple OSS Distributions }
2071*19c3b8c2SApple OSS Distributions
2072*19c3b8c2SApple OSS Distributions if (proc_get_wqptr(p) == NULL) {
2073*19c3b8c2SApple OSS Distributions if (proc_init_wqptr_or_wait(p) == FALSE) {
2074*19c3b8c2SApple OSS Distributions assert(proc_get_wqptr(p) != NULL);
2075*19c3b8c2SApple OSS Distributions goto out;
2076*19c3b8c2SApple OSS Distributions }
2077*19c3b8c2SApple OSS Distributions
2078*19c3b8c2SApple OSS Distributions wq = zalloc_flags(workq_zone_workqueue, Z_WAITOK | Z_ZERO);
2079*19c3b8c2SApple OSS Distributions
2080*19c3b8c2SApple OSS Distributions os_ref_init_count(&wq->wq_refcnt, &workq_refgrp, 1);
2081*19c3b8c2SApple OSS Distributions
2082*19c3b8c2SApple OSS Distributions // Start the event manager at the priority hinted at by the policy engine
2083*19c3b8c2SApple OSS Distributions thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
2084*19c3b8c2SApple OSS Distributions pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
2085*19c3b8c2SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pp;
2086*19c3b8c2SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
2087*19c3b8c2SApple OSS Distributions wq->wq_proc = p;
2088*19c3b8c2SApple OSS Distributions turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
2089*19c3b8c2SApple OSS Distributions TURNSTILE_WORKQS);
2090*19c3b8c2SApple OSS Distributions
2091*19c3b8c2SApple OSS Distributions TAILQ_INIT(&wq->wq_thrunlist);
2092*19c3b8c2SApple OSS Distributions TAILQ_INIT(&wq->wq_thnewlist);
2093*19c3b8c2SApple OSS Distributions TAILQ_INIT(&wq->wq_thidlelist);
2094*19c3b8c2SApple OSS Distributions priority_queue_init(&wq->wq_overcommit_queue);
2095*19c3b8c2SApple OSS Distributions priority_queue_init(&wq->wq_constrained_queue);
2096*19c3b8c2SApple OSS Distributions priority_queue_init(&wq->wq_special_queue);
2097*19c3b8c2SApple OSS Distributions for (int bucket = 0; bucket < WORKQ_NUM_QOS_BUCKETS; bucket++) {
2098*19c3b8c2SApple OSS Distributions STAILQ_INIT(&wq->wq_cooperative_queue[bucket]);
2099*19c3b8c2SApple OSS Distributions }
2100*19c3b8c2SApple OSS Distributions
2101*19c3b8c2SApple OSS Distributions /* We are only using the delayed thread call for the constrained pool
2102*19c3b8c2SApple OSS Distributions * which can't have work at >= UI QoS and so we can be fine with a
2103*19c3b8c2SApple OSS Distributions * UI QoS thread call.
2104*19c3b8c2SApple OSS Distributions */
2105*19c3b8c2SApple OSS Distributions wq->wq_delayed_call = thread_call_allocate_with_qos(
2106*19c3b8c2SApple OSS Distributions workq_add_new_threads_call, p, THREAD_QOS_USER_INTERACTIVE,
2107*19c3b8c2SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2108*19c3b8c2SApple OSS Distributions wq->wq_immediate_call = thread_call_allocate_with_options(
2109*19c3b8c2SApple OSS Distributions workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
2110*19c3b8c2SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2111*19c3b8c2SApple OSS Distributions wq->wq_death_call = thread_call_allocate_with_options(
2112*19c3b8c2SApple OSS Distributions workq_kill_old_threads_call, wq,
2113*19c3b8c2SApple OSS Distributions THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
2114*19c3b8c2SApple OSS Distributions
2115*19c3b8c2SApple OSS Distributions lck_ticket_init(&wq->wq_lock, &workq_lck_grp);
2116*19c3b8c2SApple OSS Distributions
2117*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
2118*19c3b8c2SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2119*19c3b8c2SApple OSS Distributions proc_set_wqptr(p, wq);
2120*19c3b8c2SApple OSS Distributions }
2121*19c3b8c2SApple OSS Distributions out:
2122*19c3b8c2SApple OSS Distributions
2123*19c3b8c2SApple OSS Distributions return error;
2124*19c3b8c2SApple OSS Distributions }
2125*19c3b8c2SApple OSS Distributions
2126*19c3b8c2SApple OSS Distributions /*
2127*19c3b8c2SApple OSS Distributions * Routine: workq_mark_exiting
2128*19c3b8c2SApple OSS Distributions *
2129*19c3b8c2SApple OSS Distributions * Function: Mark the work queue such that new threads will not be added to the
2130*19c3b8c2SApple OSS Distributions * work queue after we return.
2131*19c3b8c2SApple OSS Distributions *
2132*19c3b8c2SApple OSS Distributions * Conditions: Called against the current process.
2133*19c3b8c2SApple OSS Distributions */
2134*19c3b8c2SApple OSS Distributions void
workq_mark_exiting(struct proc * p)2135*19c3b8c2SApple OSS Distributions workq_mark_exiting(struct proc *p)
2136*19c3b8c2SApple OSS Distributions {
2137*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2138*19c3b8c2SApple OSS Distributions uint32_t wq_flags;
2139*19c3b8c2SApple OSS Distributions workq_threadreq_t mgr_req;
2140*19c3b8c2SApple OSS Distributions
2141*19c3b8c2SApple OSS Distributions if (!wq) {
2142*19c3b8c2SApple OSS Distributions return;
2143*19c3b8c2SApple OSS Distributions }
2144*19c3b8c2SApple OSS Distributions
2145*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0);
2146*19c3b8c2SApple OSS Distributions
2147*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
2148*19c3b8c2SApple OSS Distributions
2149*19c3b8c2SApple OSS Distributions wq_flags = os_atomic_or_orig(&wq->wq_flags, WQ_EXITING, relaxed);
2150*19c3b8c2SApple OSS Distributions if (__improbable(wq_flags & WQ_EXITING)) {
2151*19c3b8c2SApple OSS Distributions panic("workq_mark_exiting called twice");
2152*19c3b8c2SApple OSS Distributions }
2153*19c3b8c2SApple OSS Distributions
2154*19c3b8c2SApple OSS Distributions /*
2155*19c3b8c2SApple OSS Distributions * Opportunistically try to cancel thread calls that are likely in flight.
2156*19c3b8c2SApple OSS Distributions * workq_exit() will do the proper cleanup.
2157*19c3b8c2SApple OSS Distributions */
2158*19c3b8c2SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_SCHEDULED) {
2159*19c3b8c2SApple OSS Distributions thread_call_cancel(wq->wq_immediate_call);
2160*19c3b8c2SApple OSS Distributions }
2161*19c3b8c2SApple OSS Distributions if (wq_flags & WQ_DELAYED_CALL_SCHEDULED) {
2162*19c3b8c2SApple OSS Distributions thread_call_cancel(wq->wq_delayed_call);
2163*19c3b8c2SApple OSS Distributions }
2164*19c3b8c2SApple OSS Distributions if (wq_flags & WQ_DEATH_CALL_SCHEDULED) {
2165*19c3b8c2SApple OSS Distributions thread_call_cancel(wq->wq_death_call);
2166*19c3b8c2SApple OSS Distributions }
2167*19c3b8c2SApple OSS Distributions
2168*19c3b8c2SApple OSS Distributions mgr_req = wq->wq_event_manager_threadreq;
2169*19c3b8c2SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
2170*19c3b8c2SApple OSS Distributions wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
2171*19c3b8c2SApple OSS Distributions wq->wq_creator = NULL;
2172*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
2173*19c3b8c2SApple OSS Distributions
2174*19c3b8c2SApple OSS Distributions workq_unlock(wq);
2175*19c3b8c2SApple OSS Distributions
2176*19c3b8c2SApple OSS Distributions if (mgr_req) {
2177*19c3b8c2SApple OSS Distributions kqueue_threadreq_cancel(p, mgr_req);
2178*19c3b8c2SApple OSS Distributions }
2179*19c3b8c2SApple OSS Distributions /*
2180*19c3b8c2SApple OSS Distributions * No one touches the priority queues once WQ_EXITING is set.
2181*19c3b8c2SApple OSS Distributions * It is hence safe to do the tear down without holding any lock.
2182*19c3b8c2SApple OSS Distributions */
2183*19c3b8c2SApple OSS Distributions priority_queue_destroy(&wq->wq_overcommit_queue,
2184*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2185*19c3b8c2SApple OSS Distributions workq_threadreq_destroy(p, e);
2186*19c3b8c2SApple OSS Distributions });
2187*19c3b8c2SApple OSS Distributions priority_queue_destroy(&wq->wq_constrained_queue,
2188*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2189*19c3b8c2SApple OSS Distributions workq_threadreq_destroy(p, e);
2190*19c3b8c2SApple OSS Distributions });
2191*19c3b8c2SApple OSS Distributions priority_queue_destroy(&wq->wq_special_queue,
2192*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2193*19c3b8c2SApple OSS Distributions workq_threadreq_destroy(p, e);
2194*19c3b8c2SApple OSS Distributions });
2195*19c3b8c2SApple OSS Distributions
2196*19c3b8c2SApple OSS Distributions WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0);
2197*19c3b8c2SApple OSS Distributions }
2198*19c3b8c2SApple OSS Distributions
2199*19c3b8c2SApple OSS Distributions /*
2200*19c3b8c2SApple OSS Distributions * Routine: workq_exit
2201*19c3b8c2SApple OSS Distributions *
2202*19c3b8c2SApple OSS Distributions * Function: clean up the work queue structure(s) now that there are no threads
2203*19c3b8c2SApple OSS Distributions * left running inside the work queue (except possibly current_thread).
2204*19c3b8c2SApple OSS Distributions *
2205*19c3b8c2SApple OSS Distributions * Conditions: Called by the last thread in the process.
2206*19c3b8c2SApple OSS Distributions * Called against current process.
2207*19c3b8c2SApple OSS Distributions */
2208*19c3b8c2SApple OSS Distributions void
workq_exit(struct proc * p)2209*19c3b8c2SApple OSS Distributions workq_exit(struct proc *p)
2210*19c3b8c2SApple OSS Distributions {
2211*19c3b8c2SApple OSS Distributions struct workqueue *wq;
2212*19c3b8c2SApple OSS Distributions struct uthread *uth, *tmp;
2213*19c3b8c2SApple OSS Distributions
2214*19c3b8c2SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, NULL, relaxed);
2215*19c3b8c2SApple OSS Distributions if (wq != NULL) {
2216*19c3b8c2SApple OSS Distributions thread_t th = current_thread();
2217*19c3b8c2SApple OSS Distributions
2218*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0);
2219*19c3b8c2SApple OSS Distributions
2220*19c3b8c2SApple OSS Distributions if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
2221*19c3b8c2SApple OSS Distributions /*
2222*19c3b8c2SApple OSS Distributions * <rdar://problem/40111515> Make sure we will no longer call the
2223*19c3b8c2SApple OSS Distributions * sched call, if we ever block this thread, which the cancel_wait
2224*19c3b8c2SApple OSS Distributions * below can do.
2225*19c3b8c2SApple OSS Distributions */
2226*19c3b8c2SApple OSS Distributions thread_sched_call(th, NULL);
2227*19c3b8c2SApple OSS Distributions }
2228*19c3b8c2SApple OSS Distributions
2229*19c3b8c2SApple OSS Distributions /*
2230*19c3b8c2SApple OSS Distributions * Thread calls are always scheduled by the proc itself or under the
2231*19c3b8c2SApple OSS Distributions * workqueue spinlock if WQ_EXITING is not yet set.
2232*19c3b8c2SApple OSS Distributions *
2233*19c3b8c2SApple OSS Distributions * Either way, when this runs, the proc has no threads left beside
2234*19c3b8c2SApple OSS Distributions * the one running this very code, so we know no thread call can be
2235*19c3b8c2SApple OSS Distributions * dispatched anymore.
2236*19c3b8c2SApple OSS Distributions */
2237*19c3b8c2SApple OSS Distributions thread_call_cancel_wait(wq->wq_delayed_call);
2238*19c3b8c2SApple OSS Distributions thread_call_cancel_wait(wq->wq_immediate_call);
2239*19c3b8c2SApple OSS Distributions thread_call_cancel_wait(wq->wq_death_call);
2240*19c3b8c2SApple OSS Distributions thread_call_free(wq->wq_delayed_call);
2241*19c3b8c2SApple OSS Distributions thread_call_free(wq->wq_immediate_call);
2242*19c3b8c2SApple OSS Distributions thread_call_free(wq->wq_death_call);
2243*19c3b8c2SApple OSS Distributions
2244*19c3b8c2SApple OSS Distributions /*
2245*19c3b8c2SApple OSS Distributions * Clean up workqueue data structures for threads that exited and
2246*19c3b8c2SApple OSS Distributions * didn't get a chance to clean up after themselves.
2247*19c3b8c2SApple OSS Distributions *
2248*19c3b8c2SApple OSS Distributions * idle/new threads should have been interrupted and died on their own
2249*19c3b8c2SApple OSS Distributions */
2250*19c3b8c2SApple OSS Distributions TAILQ_FOREACH_SAFE(uth, &wq->wq_thrunlist, uu_workq_entry, tmp) {
2251*19c3b8c2SApple OSS Distributions thread_t mth = get_machthread(uth);
2252*19c3b8c2SApple OSS Distributions thread_sched_call(mth, NULL);
2253*19c3b8c2SApple OSS Distributions thread_deallocate(mth);
2254*19c3b8c2SApple OSS Distributions }
2255*19c3b8c2SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thnewlist));
2256*19c3b8c2SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thidlelist));
2257*19c3b8c2SApple OSS Distributions
2258*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
2259*19c3b8c2SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2260*19c3b8c2SApple OSS Distributions
2261*19c3b8c2SApple OSS Distributions workq_deallocate(wq);
2262*19c3b8c2SApple OSS Distributions
2263*19c3b8c2SApple OSS Distributions WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0);
2264*19c3b8c2SApple OSS Distributions }
2265*19c3b8c2SApple OSS Distributions }
2266*19c3b8c2SApple OSS Distributions
2267*19c3b8c2SApple OSS Distributions
2268*19c3b8c2SApple OSS Distributions #pragma mark bsd thread control
2269*19c3b8c2SApple OSS Distributions
2270*19c3b8c2SApple OSS Distributions bool
bsdthread_part_of_cooperative_workqueue(struct uthread * uth)2271*19c3b8c2SApple OSS Distributions bsdthread_part_of_cooperative_workqueue(struct uthread *uth)
2272*19c3b8c2SApple OSS Distributions {
2273*19c3b8c2SApple OSS Distributions return (workq_thread_is_cooperative(uth) || workq_thread_is_nonovercommit(uth)) &&
2274*19c3b8c2SApple OSS Distributions (uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER);
2275*19c3b8c2SApple OSS Distributions }
2276*19c3b8c2SApple OSS Distributions
2277*19c3b8c2SApple OSS Distributions static bool
_pthread_priority_to_policy(pthread_priority_t priority,thread_qos_policy_data_t * data)2278*19c3b8c2SApple OSS Distributions _pthread_priority_to_policy(pthread_priority_t priority,
2279*19c3b8c2SApple OSS Distributions thread_qos_policy_data_t *data)
2280*19c3b8c2SApple OSS Distributions {
2281*19c3b8c2SApple OSS Distributions data->qos_tier = _pthread_priority_thread_qos(priority);
2282*19c3b8c2SApple OSS Distributions data->tier_importance = _pthread_priority_relpri(priority);
2283*19c3b8c2SApple OSS Distributions if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
2284*19c3b8c2SApple OSS Distributions data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
2285*19c3b8c2SApple OSS Distributions return false;
2286*19c3b8c2SApple OSS Distributions }
2287*19c3b8c2SApple OSS Distributions return true;
2288*19c3b8c2SApple OSS Distributions }
2289*19c3b8c2SApple OSS Distributions
2290*19c3b8c2SApple OSS Distributions static int
bsdthread_set_self(proc_t p,thread_t th,pthread_priority_t priority,mach_port_name_t voucher,enum workq_set_self_flags flags)2291*19c3b8c2SApple OSS Distributions bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
2292*19c3b8c2SApple OSS Distributions mach_port_name_t voucher, enum workq_set_self_flags flags)
2293*19c3b8c2SApple OSS Distributions {
2294*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
2295*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2296*19c3b8c2SApple OSS Distributions
2297*19c3b8c2SApple OSS Distributions kern_return_t kr;
2298*19c3b8c2SApple OSS Distributions int unbind_rv = 0, qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
2299*19c3b8c2SApple OSS Distributions bool is_wq_thread = (thread_get_tag(th) & THREAD_TAG_WORKQUEUE);
2300*19c3b8c2SApple OSS Distributions
2301*19c3b8c2SApple OSS Distributions assert(th == current_thread());
2302*19c3b8c2SApple OSS Distributions if (flags & WORKQ_SET_SELF_WQ_KEVENT_UNBIND) {
2303*19c3b8c2SApple OSS Distributions if (!is_wq_thread) {
2304*19c3b8c2SApple OSS Distributions unbind_rv = EINVAL;
2305*19c3b8c2SApple OSS Distributions goto qos;
2306*19c3b8c2SApple OSS Distributions }
2307*19c3b8c2SApple OSS Distributions
2308*19c3b8c2SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
2309*19c3b8c2SApple OSS Distributions unbind_rv = EINVAL;
2310*19c3b8c2SApple OSS Distributions goto qos;
2311*19c3b8c2SApple OSS Distributions }
2312*19c3b8c2SApple OSS Distributions
2313*19c3b8c2SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
2314*19c3b8c2SApple OSS Distributions if (kqr == NULL) {
2315*19c3b8c2SApple OSS Distributions unbind_rv = EALREADY;
2316*19c3b8c2SApple OSS Distributions goto qos;
2317*19c3b8c2SApple OSS Distributions }
2318*19c3b8c2SApple OSS Distributions
2319*19c3b8c2SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2320*19c3b8c2SApple OSS Distributions unbind_rv = EINVAL;
2321*19c3b8c2SApple OSS Distributions goto qos;
2322*19c3b8c2SApple OSS Distributions }
2323*19c3b8c2SApple OSS Distributions
2324*19c3b8c2SApple OSS Distributions kqueue_threadreq_unbind(p, kqr);
2325*19c3b8c2SApple OSS Distributions }
2326*19c3b8c2SApple OSS Distributions
2327*19c3b8c2SApple OSS Distributions qos:
2328*19c3b8c2SApple OSS Distributions if (flags & (WORKQ_SET_SELF_QOS_FLAG | WORKQ_SET_SELF_QOS_OVERRIDE_FLAG)) {
2329*19c3b8c2SApple OSS Distributions assert(flags & WORKQ_SET_SELF_QOS_FLAG);
2330*19c3b8c2SApple OSS Distributions
2331*19c3b8c2SApple OSS Distributions thread_qos_policy_data_t new_policy;
2332*19c3b8c2SApple OSS Distributions thread_qos_t qos_override = THREAD_QOS_UNSPECIFIED;
2333*19c3b8c2SApple OSS Distributions
2334*19c3b8c2SApple OSS Distributions if (!_pthread_priority_to_policy(priority, &new_policy)) {
2335*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2336*19c3b8c2SApple OSS Distributions goto voucher;
2337*19c3b8c2SApple OSS Distributions }
2338*19c3b8c2SApple OSS Distributions
2339*19c3b8c2SApple OSS Distributions if (flags & WORKQ_SET_SELF_QOS_OVERRIDE_FLAG) {
2340*19c3b8c2SApple OSS Distributions /*
2341*19c3b8c2SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is set, we definitely
2342*19c3b8c2SApple OSS Distributions * should have an override QoS in the pthread_priority_t and we should
2343*19c3b8c2SApple OSS Distributions * only come into this path for cooperative thread requests
2344*19c3b8c2SApple OSS Distributions */
2345*19c3b8c2SApple OSS Distributions if (!_pthread_priority_has_override_qos(priority) ||
2346*19c3b8c2SApple OSS Distributions !_pthread_priority_is_cooperative(priority)) {
2347*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2348*19c3b8c2SApple OSS Distributions goto voucher;
2349*19c3b8c2SApple OSS Distributions }
2350*19c3b8c2SApple OSS Distributions qos_override = _pthread_priority_thread_override_qos(priority);
2351*19c3b8c2SApple OSS Distributions } else {
2352*19c3b8c2SApple OSS Distributions /*
2353*19c3b8c2SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is not set, we definitely
2354*19c3b8c2SApple OSS Distributions * should not have an override QoS in the pthread_priority_t
2355*19c3b8c2SApple OSS Distributions */
2356*19c3b8c2SApple OSS Distributions if (_pthread_priority_has_override_qos(priority)) {
2357*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2358*19c3b8c2SApple OSS Distributions goto voucher;
2359*19c3b8c2SApple OSS Distributions }
2360*19c3b8c2SApple OSS Distributions }
2361*19c3b8c2SApple OSS Distributions
2362*19c3b8c2SApple OSS Distributions if (!is_wq_thread) {
2363*19c3b8c2SApple OSS Distributions /*
2364*19c3b8c2SApple OSS Distributions * Threads opted out of QoS can't change QoS
2365*19c3b8c2SApple OSS Distributions */
2366*19c3b8c2SApple OSS Distributions if (!thread_has_qos_policy(th)) {
2367*19c3b8c2SApple OSS Distributions qos_rv = EPERM;
2368*19c3b8c2SApple OSS Distributions goto voucher;
2369*19c3b8c2SApple OSS Distributions }
2370*19c3b8c2SApple OSS Distributions } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
2371*19c3b8c2SApple OSS Distributions uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
2372*19c3b8c2SApple OSS Distributions /*
2373*19c3b8c2SApple OSS Distributions * Workqueue manager threads or threads above UI can't change QoS
2374*19c3b8c2SApple OSS Distributions */
2375*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2376*19c3b8c2SApple OSS Distributions goto voucher;
2377*19c3b8c2SApple OSS Distributions } else {
2378*19c3b8c2SApple OSS Distributions /*
2379*19c3b8c2SApple OSS Distributions * For workqueue threads, possibly adjust buckets and redrive thread
2380*19c3b8c2SApple OSS Distributions * requests.
2381*19c3b8c2SApple OSS Distributions *
2382*19c3b8c2SApple OSS Distributions * Transitions allowed:
2383*19c3b8c2SApple OSS Distributions *
2384*19c3b8c2SApple OSS Distributions * overcommit --> non-overcommit
2385*19c3b8c2SApple OSS Distributions * overcommit --> overcommit
2386*19c3b8c2SApple OSS Distributions * non-overcommit --> non-overcommit
2387*19c3b8c2SApple OSS Distributions * non-overcommit --> overcommit (to be deprecated later)
2388*19c3b8c2SApple OSS Distributions * cooperative --> cooperative
2389*19c3b8c2SApple OSS Distributions *
2390*19c3b8c2SApple OSS Distributions * All other transitions aren't allowed so reject them.
2391*19c3b8c2SApple OSS Distributions */
2392*19c3b8c2SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_cooperative(priority)) {
2393*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2394*19c3b8c2SApple OSS Distributions goto voucher;
2395*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_cooperative(uth) && !_pthread_priority_is_cooperative(priority)) {
2396*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2397*19c3b8c2SApple OSS Distributions goto voucher;
2398*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_cooperative(priority)) {
2399*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2400*19c3b8c2SApple OSS Distributions goto voucher;
2401*19c3b8c2SApple OSS Distributions }
2402*19c3b8c2SApple OSS Distributions
2403*19c3b8c2SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2404*19c3b8c2SApple OSS Distributions bool force_run = false;
2405*19c3b8c2SApple OSS Distributions
2406*19c3b8c2SApple OSS Distributions if (qos_override) {
2407*19c3b8c2SApple OSS Distributions /*
2408*19c3b8c2SApple OSS Distributions * We're in the case of a thread clarifying that it is for eg. not IN
2409*19c3b8c2SApple OSS Distributions * req QoS but rather, UT req QoS with IN override. However, this can
2410*19c3b8c2SApple OSS Distributions * race with a concurrent override happening to the thread via
2411*19c3b8c2SApple OSS Distributions * workq_thread_add_dispatch_override so this needs to be
2412*19c3b8c2SApple OSS Distributions * synchronized with the thread mutex.
2413*19c3b8c2SApple OSS Distributions */
2414*19c3b8c2SApple OSS Distributions thread_mtx_lock(th);
2415*19c3b8c2SApple OSS Distributions }
2416*19c3b8c2SApple OSS Distributions
2417*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
2418*19c3b8c2SApple OSS Distributions
2419*19c3b8c2SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2420*19c3b8c2SApple OSS Distributions new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
2421*19c3b8c2SApple OSS Distributions
2422*19c3b8c2SApple OSS Distributions if (old_pri.qos_override < qos_override) {
2423*19c3b8c2SApple OSS Distributions /*
2424*19c3b8c2SApple OSS Distributions * Since this can race with a concurrent override via
2425*19c3b8c2SApple OSS Distributions * workq_thread_add_dispatch_override, only adjust override value if we
2426*19c3b8c2SApple OSS Distributions * are higher - this is a saturating function.
2427*19c3b8c2SApple OSS Distributions *
2428*19c3b8c2SApple OSS Distributions * We should not be changing the final override values, we should simply
2429*19c3b8c2SApple OSS Distributions * be redistributing the current value with a different breakdown of req
2430*19c3b8c2SApple OSS Distributions * vs override QoS - assert to that effect. Therefore, buckets should
2431*19c3b8c2SApple OSS Distributions * not change.
2432*19c3b8c2SApple OSS Distributions */
2433*19c3b8c2SApple OSS Distributions new_pri.qos_override = qos_override;
2434*19c3b8c2SApple OSS Distributions assert(workq_pri_override(new_pri) == workq_pri_override(old_pri));
2435*19c3b8c2SApple OSS Distributions assert(workq_pri_bucket(new_pri) == workq_pri_bucket(old_pri));
2436*19c3b8c2SApple OSS Distributions }
2437*19c3b8c2SApple OSS Distributions
2438*19c3b8c2SApple OSS Distributions /* Adjust schedule counts for various types of transitions */
2439*19c3b8c2SApple OSS Distributions
2440*19c3b8c2SApple OSS Distributions /* overcommit -> non-overcommit */
2441*19c3b8c2SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_nonovercommit(priority)) {
2442*19c3b8c2SApple OSS Distributions workq_thread_set_type(uth, 0);
2443*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
2444*19c3b8c2SApple OSS Distributions
2445*19c3b8c2SApple OSS Distributions /* non-overcommit -> overcommit */
2446*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_overcommit(priority)) {
2447*19c3b8c2SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
2448*19c3b8c2SApple OSS Distributions force_run = (wq->wq_constrained_threads_scheduled-- == wq_max_constrained_threads);
2449*19c3b8c2SApple OSS Distributions
2450*19c3b8c2SApple OSS Distributions /* cooperative -> cooperative */
2451*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
2452*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_pri.qos_req);
2453*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_pri.qos_req);
2454*19c3b8c2SApple OSS Distributions
2455*19c3b8c2SApple OSS Distributions /* We're changing schedule counts within cooperative pool, we
2456*19c3b8c2SApple OSS Distributions * need to refresh best cooperative QoS logic again */
2457*19c3b8c2SApple OSS Distributions force_run = _wq_cooperative_queue_refresh_best_req_qos(wq);
2458*19c3b8c2SApple OSS Distributions }
2459*19c3b8c2SApple OSS Distributions
2460*19c3b8c2SApple OSS Distributions /*
2461*19c3b8c2SApple OSS Distributions * This will set up an override on the thread if any and will also call
2462*19c3b8c2SApple OSS Distributions * schedule_creator if needed
2463*19c3b8c2SApple OSS Distributions */
2464*19c3b8c2SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
2465*19c3b8c2SApple OSS Distributions workq_unlock(wq);
2466*19c3b8c2SApple OSS Distributions
2467*19c3b8c2SApple OSS Distributions if (qos_override) {
2468*19c3b8c2SApple OSS Distributions thread_mtx_unlock(th);
2469*19c3b8c2SApple OSS Distributions }
2470*19c3b8c2SApple OSS Distributions
2471*19c3b8c2SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
2472*19c3b8c2SApple OSS Distributions thread_disarm_workqueue_quantum(th);
2473*19c3b8c2SApple OSS Distributions } else {
2474*19c3b8c2SApple OSS Distributions /* If the thread changed QoS buckets, the quantum duration
2475*19c3b8c2SApple OSS Distributions * may have changed too */
2476*19c3b8c2SApple OSS Distributions thread_arm_workqueue_quantum(th);
2477*19c3b8c2SApple OSS Distributions }
2478*19c3b8c2SApple OSS Distributions }
2479*19c3b8c2SApple OSS Distributions
2480*19c3b8c2SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
2481*19c3b8c2SApple OSS Distributions (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
2482*19c3b8c2SApple OSS Distributions if (kr != KERN_SUCCESS) {
2483*19c3b8c2SApple OSS Distributions qos_rv = EINVAL;
2484*19c3b8c2SApple OSS Distributions }
2485*19c3b8c2SApple OSS Distributions }
2486*19c3b8c2SApple OSS Distributions
2487*19c3b8c2SApple OSS Distributions voucher:
2488*19c3b8c2SApple OSS Distributions if (flags & WORKQ_SET_SELF_VOUCHER_FLAG) {
2489*19c3b8c2SApple OSS Distributions kr = thread_set_voucher_name(voucher);
2490*19c3b8c2SApple OSS Distributions if (kr != KERN_SUCCESS) {
2491*19c3b8c2SApple OSS Distributions voucher_rv = ENOENT;
2492*19c3b8c2SApple OSS Distributions goto fixedpri;
2493*19c3b8c2SApple OSS Distributions }
2494*19c3b8c2SApple OSS Distributions }
2495*19c3b8c2SApple OSS Distributions
2496*19c3b8c2SApple OSS Distributions fixedpri:
2497*19c3b8c2SApple OSS Distributions if (qos_rv) {
2498*19c3b8c2SApple OSS Distributions goto done;
2499*19c3b8c2SApple OSS Distributions }
2500*19c3b8c2SApple OSS Distributions if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
2501*19c3b8c2SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 0};
2502*19c3b8c2SApple OSS Distributions
2503*19c3b8c2SApple OSS Distributions if (is_wq_thread) {
2504*19c3b8c2SApple OSS Distributions /* Not allowed on workqueue threads */
2505*19c3b8c2SApple OSS Distributions fixedpri_rv = ENOTSUP;
2506*19c3b8c2SApple OSS Distributions goto done;
2507*19c3b8c2SApple OSS Distributions }
2508*19c3b8c2SApple OSS Distributions
2509*19c3b8c2SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2510*19c3b8c2SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2511*19c3b8c2SApple OSS Distributions if (kr != KERN_SUCCESS) {
2512*19c3b8c2SApple OSS Distributions fixedpri_rv = EINVAL;
2513*19c3b8c2SApple OSS Distributions goto done;
2514*19c3b8c2SApple OSS Distributions }
2515*19c3b8c2SApple OSS Distributions } else if (flags & WORKQ_SET_SELF_TIMESHARE_FLAG) {
2516*19c3b8c2SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 1};
2517*19c3b8c2SApple OSS Distributions
2518*19c3b8c2SApple OSS Distributions if (is_wq_thread) {
2519*19c3b8c2SApple OSS Distributions /* Not allowed on workqueue threads */
2520*19c3b8c2SApple OSS Distributions fixedpri_rv = ENOTSUP;
2521*19c3b8c2SApple OSS Distributions goto done;
2522*19c3b8c2SApple OSS Distributions }
2523*19c3b8c2SApple OSS Distributions
2524*19c3b8c2SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2525*19c3b8c2SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2526*19c3b8c2SApple OSS Distributions if (kr != KERN_SUCCESS) {
2527*19c3b8c2SApple OSS Distributions fixedpri_rv = EINVAL;
2528*19c3b8c2SApple OSS Distributions goto done;
2529*19c3b8c2SApple OSS Distributions }
2530*19c3b8c2SApple OSS Distributions }
2531*19c3b8c2SApple OSS Distributions
2532*19c3b8c2SApple OSS Distributions done:
2533*19c3b8c2SApple OSS Distributions if (qos_rv && voucher_rv) {
2534*19c3b8c2SApple OSS Distributions /* Both failed, give that a unique error. */
2535*19c3b8c2SApple OSS Distributions return EBADMSG;
2536*19c3b8c2SApple OSS Distributions }
2537*19c3b8c2SApple OSS Distributions
2538*19c3b8c2SApple OSS Distributions if (unbind_rv) {
2539*19c3b8c2SApple OSS Distributions return unbind_rv;
2540*19c3b8c2SApple OSS Distributions }
2541*19c3b8c2SApple OSS Distributions
2542*19c3b8c2SApple OSS Distributions if (qos_rv) {
2543*19c3b8c2SApple OSS Distributions return qos_rv;
2544*19c3b8c2SApple OSS Distributions }
2545*19c3b8c2SApple OSS Distributions
2546*19c3b8c2SApple OSS Distributions if (voucher_rv) {
2547*19c3b8c2SApple OSS Distributions return voucher_rv;
2548*19c3b8c2SApple OSS Distributions }
2549*19c3b8c2SApple OSS Distributions
2550*19c3b8c2SApple OSS Distributions if (fixedpri_rv) {
2551*19c3b8c2SApple OSS Distributions return fixedpri_rv;
2552*19c3b8c2SApple OSS Distributions }
2553*19c3b8c2SApple OSS Distributions
2554*19c3b8c2SApple OSS Distributions
2555*19c3b8c2SApple OSS Distributions return 0;
2556*19c3b8c2SApple OSS Distributions }
2557*19c3b8c2SApple OSS Distributions
2558*19c3b8c2SApple OSS Distributions static int
bsdthread_add_explicit_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t resource)2559*19c3b8c2SApple OSS Distributions bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
2560*19c3b8c2SApple OSS Distributions pthread_priority_t pp, user_addr_t resource)
2561*19c3b8c2SApple OSS Distributions {
2562*19c3b8c2SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2563*19c3b8c2SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
2564*19c3b8c2SApple OSS Distributions return EINVAL;
2565*19c3b8c2SApple OSS Distributions }
2566*19c3b8c2SApple OSS Distributions
2567*19c3b8c2SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2568*19c3b8c2SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2569*19c3b8c2SApple OSS Distributions if (th == THREAD_NULL) {
2570*19c3b8c2SApple OSS Distributions return ESRCH;
2571*19c3b8c2SApple OSS Distributions }
2572*19c3b8c2SApple OSS Distributions
2573*19c3b8c2SApple OSS Distributions int rv = proc_thread_qos_add_override(proc_task(p), th, 0, qos, TRUE,
2574*19c3b8c2SApple OSS Distributions resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2575*19c3b8c2SApple OSS Distributions
2576*19c3b8c2SApple OSS Distributions thread_deallocate(th);
2577*19c3b8c2SApple OSS Distributions return rv;
2578*19c3b8c2SApple OSS Distributions }
2579*19c3b8c2SApple OSS Distributions
2580*19c3b8c2SApple OSS Distributions static int
bsdthread_remove_explicit_override(proc_t p,mach_port_name_t kport,user_addr_t resource)2581*19c3b8c2SApple OSS Distributions bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
2582*19c3b8c2SApple OSS Distributions user_addr_t resource)
2583*19c3b8c2SApple OSS Distributions {
2584*19c3b8c2SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2585*19c3b8c2SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2586*19c3b8c2SApple OSS Distributions if (th == THREAD_NULL) {
2587*19c3b8c2SApple OSS Distributions return ESRCH;
2588*19c3b8c2SApple OSS Distributions }
2589*19c3b8c2SApple OSS Distributions
2590*19c3b8c2SApple OSS Distributions int rv = proc_thread_qos_remove_override(proc_task(p), th, 0, resource,
2591*19c3b8c2SApple OSS Distributions THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2592*19c3b8c2SApple OSS Distributions
2593*19c3b8c2SApple OSS Distributions thread_deallocate(th);
2594*19c3b8c2SApple OSS Distributions return rv;
2595*19c3b8c2SApple OSS Distributions }
2596*19c3b8c2SApple OSS Distributions
2597*19c3b8c2SApple OSS Distributions static int
workq_thread_add_dispatch_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t ulock_addr)2598*19c3b8c2SApple OSS Distributions workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
2599*19c3b8c2SApple OSS Distributions pthread_priority_t pp, user_addr_t ulock_addr)
2600*19c3b8c2SApple OSS Distributions {
2601*19c3b8c2SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2602*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2603*19c3b8c2SApple OSS Distributions
2604*19c3b8c2SApple OSS Distributions thread_qos_t qos_override = _pthread_priority_thread_qos(pp);
2605*19c3b8c2SApple OSS Distributions if (qos_override == THREAD_QOS_UNSPECIFIED) {
2606*19c3b8c2SApple OSS Distributions return EINVAL;
2607*19c3b8c2SApple OSS Distributions }
2608*19c3b8c2SApple OSS Distributions
2609*19c3b8c2SApple OSS Distributions thread_t thread = port_name_to_thread(kport,
2610*19c3b8c2SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2611*19c3b8c2SApple OSS Distributions if (thread == THREAD_NULL) {
2612*19c3b8c2SApple OSS Distributions return ESRCH;
2613*19c3b8c2SApple OSS Distributions }
2614*19c3b8c2SApple OSS Distributions
2615*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2616*19c3b8c2SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2617*19c3b8c2SApple OSS Distributions thread_deallocate(thread);
2618*19c3b8c2SApple OSS Distributions return EPERM;
2619*19c3b8c2SApple OSS Distributions }
2620*19c3b8c2SApple OSS Distributions
2621*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
2622*19c3b8c2SApple OSS Distributions wq, thread_tid(thread), 1, pp);
2623*19c3b8c2SApple OSS Distributions
2624*19c3b8c2SApple OSS Distributions thread_mtx_lock(thread);
2625*19c3b8c2SApple OSS Distributions
2626*19c3b8c2SApple OSS Distributions if (ulock_addr) {
2627*19c3b8c2SApple OSS Distributions uint32_t val;
2628*19c3b8c2SApple OSS Distributions int rc;
2629*19c3b8c2SApple OSS Distributions /*
2630*19c3b8c2SApple OSS Distributions * Workaround lack of explicit support for 'no-fault copyin'
2631*19c3b8c2SApple OSS Distributions * <rdar://problem/24999882>, as disabling preemption prevents paging in
2632*19c3b8c2SApple OSS Distributions */
2633*19c3b8c2SApple OSS Distributions disable_preemption();
2634*19c3b8c2SApple OSS Distributions rc = copyin_atomic32(ulock_addr, &val);
2635*19c3b8c2SApple OSS Distributions enable_preemption();
2636*19c3b8c2SApple OSS Distributions if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
2637*19c3b8c2SApple OSS Distributions goto out;
2638*19c3b8c2SApple OSS Distributions }
2639*19c3b8c2SApple OSS Distributions }
2640*19c3b8c2SApple OSS Distributions
2641*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
2642*19c3b8c2SApple OSS Distributions
2643*19c3b8c2SApple OSS Distributions old_pri = uth->uu_workq_pri;
2644*19c3b8c2SApple OSS Distributions if (old_pri.qos_override >= qos_override) {
2645*19c3b8c2SApple OSS Distributions /* Nothing to do */
2646*19c3b8c2SApple OSS Distributions } else if (thread == current_thread()) {
2647*19c3b8c2SApple OSS Distributions new_pri = old_pri;
2648*19c3b8c2SApple OSS Distributions new_pri.qos_override = qos_override;
2649*19c3b8c2SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2650*19c3b8c2SApple OSS Distributions } else {
2651*19c3b8c2SApple OSS Distributions uth->uu_workq_pri.qos_override = qos_override;
2652*19c3b8c2SApple OSS Distributions if (qos_override > workq_pri_override(old_pri)) {
2653*19c3b8c2SApple OSS Distributions thread_set_workq_override(thread, qos_override);
2654*19c3b8c2SApple OSS Distributions }
2655*19c3b8c2SApple OSS Distributions }
2656*19c3b8c2SApple OSS Distributions
2657*19c3b8c2SApple OSS Distributions workq_unlock(wq);
2658*19c3b8c2SApple OSS Distributions
2659*19c3b8c2SApple OSS Distributions out:
2660*19c3b8c2SApple OSS Distributions thread_mtx_unlock(thread);
2661*19c3b8c2SApple OSS Distributions thread_deallocate(thread);
2662*19c3b8c2SApple OSS Distributions return 0;
2663*19c3b8c2SApple OSS Distributions }
2664*19c3b8c2SApple OSS Distributions
2665*19c3b8c2SApple OSS Distributions static int
workq_thread_reset_dispatch_override(proc_t p,thread_t thread)2666*19c3b8c2SApple OSS Distributions workq_thread_reset_dispatch_override(proc_t p, thread_t thread)
2667*19c3b8c2SApple OSS Distributions {
2668*19c3b8c2SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2669*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2670*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2671*19c3b8c2SApple OSS Distributions
2672*19c3b8c2SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2673*19c3b8c2SApple OSS Distributions return EPERM;
2674*19c3b8c2SApple OSS Distributions }
2675*19c3b8c2SApple OSS Distributions
2676*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, wq, 0, 0, 0);
2677*19c3b8c2SApple OSS Distributions
2678*19c3b8c2SApple OSS Distributions /*
2679*19c3b8c2SApple OSS Distributions * workq_thread_add_dispatch_override takes the thread mutex before doing the
2680*19c3b8c2SApple OSS Distributions * copyin to validate the drainer and apply the override. We need to do the
2681*19c3b8c2SApple OSS Distributions * same here. See rdar://84472518
2682*19c3b8c2SApple OSS Distributions */
2683*19c3b8c2SApple OSS Distributions thread_mtx_lock(thread);
2684*19c3b8c2SApple OSS Distributions
2685*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
2686*19c3b8c2SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2687*19c3b8c2SApple OSS Distributions new_pri.qos_override = THREAD_QOS_UNSPECIFIED;
2688*19c3b8c2SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2689*19c3b8c2SApple OSS Distributions workq_unlock(wq);
2690*19c3b8c2SApple OSS Distributions
2691*19c3b8c2SApple OSS Distributions thread_mtx_unlock(thread);
2692*19c3b8c2SApple OSS Distributions return 0;
2693*19c3b8c2SApple OSS Distributions }
2694*19c3b8c2SApple OSS Distributions
2695*19c3b8c2SApple OSS Distributions static int
workq_thread_allow_kill(__unused proc_t p,thread_t thread,bool enable)2696*19c3b8c2SApple OSS Distributions workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
2697*19c3b8c2SApple OSS Distributions {
2698*19c3b8c2SApple OSS Distributions if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
2699*19c3b8c2SApple OSS Distributions // If the thread isn't a workqueue thread, don't set the
2700*19c3b8c2SApple OSS Distributions // kill_allowed bit; however, we still need to return 0
2701*19c3b8c2SApple OSS Distributions // instead of an error code since this code is executed
2702*19c3b8c2SApple OSS Distributions // on the abort path which needs to not depend on the
2703*19c3b8c2SApple OSS Distributions // pthread_t (returning an error depends on pthread_t via
2704*19c3b8c2SApple OSS Distributions // cerror_nocancel)
2705*19c3b8c2SApple OSS Distributions return 0;
2706*19c3b8c2SApple OSS Distributions }
2707*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2708*19c3b8c2SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = enable;
2709*19c3b8c2SApple OSS Distributions return 0;
2710*19c3b8c2SApple OSS Distributions }
2711*19c3b8c2SApple OSS Distributions
2712*19c3b8c2SApple OSS Distributions static int
bsdthread_get_max_parallelism(thread_qos_t qos,unsigned long flags,int * retval)2713*19c3b8c2SApple OSS Distributions bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
2714*19c3b8c2SApple OSS Distributions int *retval)
2715*19c3b8c2SApple OSS Distributions {
2716*19c3b8c2SApple OSS Distributions static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
2717*19c3b8c2SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
2718*19c3b8c2SApple OSS Distributions static_assert(QOS_PARALLELISM_REALTIME ==
2719*19c3b8c2SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
2720*19c3b8c2SApple OSS Distributions static_assert(QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE ==
2721*19c3b8c2SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC, "cluster shared resource");
2722*19c3b8c2SApple OSS Distributions
2723*19c3b8c2SApple OSS Distributions if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL | QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE)) {
2724*19c3b8c2SApple OSS Distributions return EINVAL;
2725*19c3b8c2SApple OSS Distributions }
2726*19c3b8c2SApple OSS Distributions
2727*19c3b8c2SApple OSS Distributions /* No units are present */
2728*19c3b8c2SApple OSS Distributions if (flags & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) {
2729*19c3b8c2SApple OSS Distributions return ENOTSUP;
2730*19c3b8c2SApple OSS Distributions }
2731*19c3b8c2SApple OSS Distributions
2732*19c3b8c2SApple OSS Distributions if (flags & QOS_PARALLELISM_REALTIME) {
2733*19c3b8c2SApple OSS Distributions if (qos) {
2734*19c3b8c2SApple OSS Distributions return EINVAL;
2735*19c3b8c2SApple OSS Distributions }
2736*19c3b8c2SApple OSS Distributions } else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
2737*19c3b8c2SApple OSS Distributions return EINVAL;
2738*19c3b8c2SApple OSS Distributions }
2739*19c3b8c2SApple OSS Distributions
2740*19c3b8c2SApple OSS Distributions *retval = qos_max_parallelism(qos, flags);
2741*19c3b8c2SApple OSS Distributions return 0;
2742*19c3b8c2SApple OSS Distributions }
2743*19c3b8c2SApple OSS Distributions
2744*19c3b8c2SApple OSS Distributions static int
bsdthread_dispatch_apply_attr(__unused struct proc * p,thread_t thread,unsigned long flags,uint64_t value1,__unused uint64_t value2)2745*19c3b8c2SApple OSS Distributions bsdthread_dispatch_apply_attr(__unused struct proc *p, thread_t thread,
2746*19c3b8c2SApple OSS Distributions unsigned long flags, uint64_t value1, __unused uint64_t value2)
2747*19c3b8c2SApple OSS Distributions {
2748*19c3b8c2SApple OSS Distributions uint32_t apply_worker_index;
2749*19c3b8c2SApple OSS Distributions kern_return_t kr;
2750*19c3b8c2SApple OSS Distributions
2751*19c3b8c2SApple OSS Distributions switch (flags) {
2752*19c3b8c2SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET:
2753*19c3b8c2SApple OSS Distributions apply_worker_index = (uint32_t)value1;
2754*19c3b8c2SApple OSS Distributions kr = thread_shared_rsrc_policy_set(thread, apply_worker_index, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2755*19c3b8c2SApple OSS Distributions /*
2756*19c3b8c2SApple OSS Distributions * KERN_INVALID_POLICY indicates that the thread was trying to bind to a
2757*19c3b8c2SApple OSS Distributions * cluster which it was not eligible to execute on.
2758*19c3b8c2SApple OSS Distributions */
2759*19c3b8c2SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : ((kr == KERN_INVALID_POLICY) ? ENOTSUP : EINVAL);
2760*19c3b8c2SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR:
2761*19c3b8c2SApple OSS Distributions kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2762*19c3b8c2SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : EINVAL;
2763*19c3b8c2SApple OSS Distributions default:
2764*19c3b8c2SApple OSS Distributions return EINVAL;
2765*19c3b8c2SApple OSS Distributions }
2766*19c3b8c2SApple OSS Distributions }
2767*19c3b8c2SApple OSS Distributions
2768*19c3b8c2SApple OSS Distributions #define ENSURE_UNUSED(arg) \
2769*19c3b8c2SApple OSS Distributions ({ if ((arg) != 0) { return EINVAL; } })
2770*19c3b8c2SApple OSS Distributions
2771*19c3b8c2SApple OSS Distributions int
bsdthread_ctl(struct proc * p,struct bsdthread_ctl_args * uap,int * retval)2772*19c3b8c2SApple OSS Distributions bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
2773*19c3b8c2SApple OSS Distributions {
2774*19c3b8c2SApple OSS Distributions switch (uap->cmd) {
2775*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_START:
2776*19c3b8c2SApple OSS Distributions return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
2777*19c3b8c2SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2778*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_END:
2779*19c3b8c2SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2780*19c3b8c2SApple OSS Distributions return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
2781*19c3b8c2SApple OSS Distributions (user_addr_t)uap->arg2);
2782*19c3b8c2SApple OSS Distributions
2783*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
2784*19c3b8c2SApple OSS Distributions return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
2785*19c3b8c2SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2786*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
2787*19c3b8c2SApple OSS Distributions return workq_thread_reset_dispatch_override(p, current_thread());
2788*19c3b8c2SApple OSS Distributions
2789*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_SET_SELF:
2790*19c3b8c2SApple OSS Distributions return bsdthread_set_self(p, current_thread(),
2791*19c3b8c2SApple OSS Distributions (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
2792*19c3b8c2SApple OSS Distributions (enum workq_set_self_flags)uap->arg3);
2793*19c3b8c2SApple OSS Distributions
2794*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
2795*19c3b8c2SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2796*19c3b8c2SApple OSS Distributions return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
2797*19c3b8c2SApple OSS Distributions (unsigned long)uap->arg2, retval);
2798*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
2799*19c3b8c2SApple OSS Distributions ENSURE_UNUSED(uap->arg2);
2800*19c3b8c2SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2801*19c3b8c2SApple OSS Distributions return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
2802*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_DISPATCH_APPLY_ATTR:
2803*19c3b8c2SApple OSS Distributions return bsdthread_dispatch_apply_attr(p, current_thread(),
2804*19c3b8c2SApple OSS Distributions (unsigned long)uap->arg1, (uint64_t)uap->arg2,
2805*19c3b8c2SApple OSS Distributions (uint64_t)uap->arg3);
2806*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_SET_QOS:
2807*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
2808*19c3b8c2SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
2809*19c3b8c2SApple OSS Distributions /* no longer supported */
2810*19c3b8c2SApple OSS Distributions return ENOTSUP;
2811*19c3b8c2SApple OSS Distributions
2812*19c3b8c2SApple OSS Distributions default:
2813*19c3b8c2SApple OSS Distributions return EINVAL;
2814*19c3b8c2SApple OSS Distributions }
2815*19c3b8c2SApple OSS Distributions }
2816*19c3b8c2SApple OSS Distributions
2817*19c3b8c2SApple OSS Distributions #pragma mark workqueue thread manipulation
2818*19c3b8c2SApple OSS Distributions
2819*19c3b8c2SApple OSS Distributions static void __dead2
2820*19c3b8c2SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2821*19c3b8c2SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2822*19c3b8c2SApple OSS Distributions
2823*19c3b8c2SApple OSS Distributions static void __dead2
2824*19c3b8c2SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2825*19c3b8c2SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2826*19c3b8c2SApple OSS Distributions
2827*19c3b8c2SApple OSS Distributions static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
2828*19c3b8c2SApple OSS Distributions
2829*19c3b8c2SApple OSS Distributions #if KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD
2830*19c3b8c2SApple OSS Distributions static inline uint64_t
workq_trace_req_id(workq_threadreq_t req)2831*19c3b8c2SApple OSS Distributions workq_trace_req_id(workq_threadreq_t req)
2832*19c3b8c2SApple OSS Distributions {
2833*19c3b8c2SApple OSS Distributions struct kqworkloop *kqwl;
2834*19c3b8c2SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2835*19c3b8c2SApple OSS Distributions kqwl = __container_of(req, struct kqworkloop, kqwl_request);
2836*19c3b8c2SApple OSS Distributions return kqwl->kqwl_dynamicid;
2837*19c3b8c2SApple OSS Distributions }
2838*19c3b8c2SApple OSS Distributions
2839*19c3b8c2SApple OSS Distributions return VM_KERNEL_ADDRHIDE(req);
2840*19c3b8c2SApple OSS Distributions }
2841*19c3b8c2SApple OSS Distributions #endif
2842*19c3b8c2SApple OSS Distributions
2843*19c3b8c2SApple OSS Distributions /**
2844*19c3b8c2SApple OSS Distributions * Entry point for libdispatch to ask for threads
2845*19c3b8c2SApple OSS Distributions */
2846*19c3b8c2SApple OSS Distributions static int
workq_reqthreads(struct proc * p,uint32_t reqcount,pthread_priority_t pp,bool cooperative)2847*19c3b8c2SApple OSS Distributions workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp, bool cooperative)
2848*19c3b8c2SApple OSS Distributions {
2849*19c3b8c2SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2850*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2851*19c3b8c2SApple OSS Distributions uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
2852*19c3b8c2SApple OSS Distributions int ret = 0;
2853*19c3b8c2SApple OSS Distributions
2854*19c3b8c2SApple OSS Distributions if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
2855*19c3b8c2SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
2856*19c3b8c2SApple OSS Distributions ret = EINVAL;
2857*19c3b8c2SApple OSS Distributions goto exit;
2858*19c3b8c2SApple OSS Distributions }
2859*19c3b8c2SApple OSS Distributions
2860*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
2861*19c3b8c2SApple OSS Distributions wq, reqcount, pp, cooperative);
2862*19c3b8c2SApple OSS Distributions
2863*19c3b8c2SApple OSS Distributions workq_threadreq_t req = zalloc(workq_zone_threadreq);
2864*19c3b8c2SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
2865*19c3b8c2SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
2866*19c3b8c2SApple OSS Distributions req->tr_qos = qos;
2867*19c3b8c2SApple OSS Distributions workq_tr_flags_t tr_flags = 0;
2868*19c3b8c2SApple OSS Distributions
2869*19c3b8c2SApple OSS Distributions if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
2870*19c3b8c2SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
2871*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
2872*19c3b8c2SApple OSS Distributions }
2873*19c3b8c2SApple OSS Distributions
2874*19c3b8c2SApple OSS Distributions if (cooperative) {
2875*19c3b8c2SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_COOPERATIVE;
2876*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
2877*19c3b8c2SApple OSS Distributions
2878*19c3b8c2SApple OSS Distributions if (reqcount > 1) {
2879*19c3b8c2SApple OSS Distributions ret = ENOTSUP;
2880*19c3b8c2SApple OSS Distributions goto free_and_exit;
2881*19c3b8c2SApple OSS Distributions }
2882*19c3b8c2SApple OSS Distributions }
2883*19c3b8c2SApple OSS Distributions
2884*19c3b8c2SApple OSS Distributions /* A thread request cannot be both overcommit and cooperative */
2885*19c3b8c2SApple OSS Distributions if (workq_tr_is_cooperative(tr_flags) &&
2886*19c3b8c2SApple OSS Distributions workq_tr_is_overcommit(tr_flags)) {
2887*19c3b8c2SApple OSS Distributions ret = EINVAL;
2888*19c3b8c2SApple OSS Distributions goto free_and_exit;
2889*19c3b8c2SApple OSS Distributions }
2890*19c3b8c2SApple OSS Distributions req->tr_flags = tr_flags;
2891*19c3b8c2SApple OSS Distributions
2892*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
2893*19c3b8c2SApple OSS Distributions wq, workq_trace_req_id(req), req->tr_qos, reqcount);
2894*19c3b8c2SApple OSS Distributions
2895*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
2896*19c3b8c2SApple OSS Distributions do {
2897*19c3b8c2SApple OSS Distributions if (_wq_exiting(wq)) {
2898*19c3b8c2SApple OSS Distributions goto unlock_and_exit;
2899*19c3b8c2SApple OSS Distributions }
2900*19c3b8c2SApple OSS Distributions
2901*19c3b8c2SApple OSS Distributions /*
2902*19c3b8c2SApple OSS Distributions * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
2903*19c3b8c2SApple OSS Distributions * threads without pacing, to inform the scheduler of that workload.
2904*19c3b8c2SApple OSS Distributions *
2905*19c3b8c2SApple OSS Distributions * The last requests, or the ones that failed the admission checks are
2906*19c3b8c2SApple OSS Distributions * enqueued and go through the regular creator codepath.
2907*19c3b8c2SApple OSS Distributions *
2908*19c3b8c2SApple OSS Distributions * If there aren't enough threads, add one, but re-evaluate everything
2909*19c3b8c2SApple OSS Distributions * as conditions may now have changed.
2910*19c3b8c2SApple OSS Distributions */
2911*19c3b8c2SApple OSS Distributions unpaced = reqcount - 1;
2912*19c3b8c2SApple OSS Distributions
2913*19c3b8c2SApple OSS Distributions if (reqcount > 1) {
2914*19c3b8c2SApple OSS Distributions /* We don't handle asking for parallelism on the cooperative
2915*19c3b8c2SApple OSS Distributions * workqueue just yet */
2916*19c3b8c2SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
2917*19c3b8c2SApple OSS Distributions
2918*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
2919*19c3b8c2SApple OSS Distributions unpaced = workq_constrained_allowance(wq, qos, NULL, false);
2920*19c3b8c2SApple OSS Distributions if (unpaced >= reqcount - 1) {
2921*19c3b8c2SApple OSS Distributions unpaced = reqcount - 1;
2922*19c3b8c2SApple OSS Distributions }
2923*19c3b8c2SApple OSS Distributions }
2924*19c3b8c2SApple OSS Distributions }
2925*19c3b8c2SApple OSS Distributions
2926*19c3b8c2SApple OSS Distributions /*
2927*19c3b8c2SApple OSS Distributions * This path does not currently handle custom workloop parameters
2928*19c3b8c2SApple OSS Distributions * when creating threads for parallelism.
2929*19c3b8c2SApple OSS Distributions */
2930*19c3b8c2SApple OSS Distributions assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
2931*19c3b8c2SApple OSS Distributions
2932*19c3b8c2SApple OSS Distributions /*
2933*19c3b8c2SApple OSS Distributions * This is a trimmed down version of workq_threadreq_bind_and_unlock()
2934*19c3b8c2SApple OSS Distributions */
2935*19c3b8c2SApple OSS Distributions while (unpaced > 0 && wq->wq_thidlecount) {
2936*19c3b8c2SApple OSS Distributions struct uthread *uth;
2937*19c3b8c2SApple OSS Distributions bool needs_wakeup;
2938*19c3b8c2SApple OSS Distributions uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
2939*19c3b8c2SApple OSS Distributions
2940*19c3b8c2SApple OSS Distributions if (workq_tr_is_overcommit(req->tr_flags)) {
2941*19c3b8c2SApple OSS Distributions uu_flags |= UT_WORKQ_OVERCOMMIT;
2942*19c3b8c2SApple OSS Distributions }
2943*19c3b8c2SApple OSS Distributions
2944*19c3b8c2SApple OSS Distributions uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
2945*19c3b8c2SApple OSS Distributions
2946*19c3b8c2SApple OSS Distributions _wq_thactive_inc(wq, qos);
2947*19c3b8c2SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(qos)]++;
2948*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
2949*19c3b8c2SApple OSS Distributions wq->wq_fulfilled++;
2950*19c3b8c2SApple OSS Distributions
2951*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
2952*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.thread_request = req;
2953*19c3b8c2SApple OSS Distributions if (needs_wakeup) {
2954*19c3b8c2SApple OSS Distributions workq_thread_wakeup(uth);
2955*19c3b8c2SApple OSS Distributions }
2956*19c3b8c2SApple OSS Distributions unpaced--;
2957*19c3b8c2SApple OSS Distributions reqcount--;
2958*19c3b8c2SApple OSS Distributions }
2959*19c3b8c2SApple OSS Distributions } while (unpaced && wq->wq_nthreads < wq_max_threads &&
2960*19c3b8c2SApple OSS Distributions workq_add_new_idle_thread(p, wq));
2961*19c3b8c2SApple OSS Distributions
2962*19c3b8c2SApple OSS Distributions if (_wq_exiting(wq)) {
2963*19c3b8c2SApple OSS Distributions goto unlock_and_exit;
2964*19c3b8c2SApple OSS Distributions }
2965*19c3b8c2SApple OSS Distributions
2966*19c3b8c2SApple OSS Distributions req->tr_count = (uint16_t)reqcount;
2967*19c3b8c2SApple OSS Distributions if (workq_threadreq_enqueue(wq, req)) {
2968*19c3b8c2SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
2969*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
2970*19c3b8c2SApple OSS Distributions }
2971*19c3b8c2SApple OSS Distributions workq_unlock(wq);
2972*19c3b8c2SApple OSS Distributions return 0;
2973*19c3b8c2SApple OSS Distributions
2974*19c3b8c2SApple OSS Distributions unlock_and_exit:
2975*19c3b8c2SApple OSS Distributions workq_unlock(wq);
2976*19c3b8c2SApple OSS Distributions free_and_exit:
2977*19c3b8c2SApple OSS Distributions zfree(workq_zone_threadreq, req);
2978*19c3b8c2SApple OSS Distributions exit:
2979*19c3b8c2SApple OSS Distributions return ret;
2980*19c3b8c2SApple OSS Distributions }
2981*19c3b8c2SApple OSS Distributions
2982*19c3b8c2SApple OSS Distributions bool
workq_kern_threadreq_initiate(struct proc * p,workq_threadreq_t req,struct turnstile * workloop_ts,thread_qos_t qos,workq_kern_threadreq_flags_t flags)2983*19c3b8c2SApple OSS Distributions workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
2984*19c3b8c2SApple OSS Distributions struct turnstile *workloop_ts, thread_qos_t qos,
2985*19c3b8c2SApple OSS Distributions workq_kern_threadreq_flags_t flags)
2986*19c3b8c2SApple OSS Distributions {
2987*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
2988*19c3b8c2SApple OSS Distributions struct uthread *uth = NULL;
2989*19c3b8c2SApple OSS Distributions
2990*19c3b8c2SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
2991*19c3b8c2SApple OSS Distributions
2992*19c3b8c2SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
2993*19c3b8c2SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
2994*19c3b8c2SApple OSS Distributions qos = thread_workq_qos_for_pri(trp.trp_pri);
2995*19c3b8c2SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
2996*19c3b8c2SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
2997*19c3b8c2SApple OSS Distributions }
2998*19c3b8c2SApple OSS Distributions }
2999*19c3b8c2SApple OSS Distributions
3000*19c3b8c2SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_IDLE);
3001*19c3b8c2SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
3002*19c3b8c2SApple OSS Distributions req->tr_count = 1;
3003*19c3b8c2SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3004*19c3b8c2SApple OSS Distributions req->tr_qos = qos;
3005*19c3b8c2SApple OSS Distributions
3006*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
3007*19c3b8c2SApple OSS Distributions workq_trace_req_id(req), qos, 1);
3008*19c3b8c2SApple OSS Distributions
3009*19c3b8c2SApple OSS Distributions if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
3010*19c3b8c2SApple OSS Distributions /*
3011*19c3b8c2SApple OSS Distributions * we're called back synchronously from the context of
3012*19c3b8c2SApple OSS Distributions * kqueue_threadreq_unbind from within workq_thread_return()
3013*19c3b8c2SApple OSS Distributions * we can try to match up this thread with this request !
3014*19c3b8c2SApple OSS Distributions */
3015*19c3b8c2SApple OSS Distributions uth = current_uthread();
3016*19c3b8c2SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3017*19c3b8c2SApple OSS Distributions }
3018*19c3b8c2SApple OSS Distributions
3019*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3020*19c3b8c2SApple OSS Distributions if (_wq_exiting(wq)) {
3021*19c3b8c2SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_IDLE;
3022*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3023*19c3b8c2SApple OSS Distributions return false;
3024*19c3b8c2SApple OSS Distributions }
3025*19c3b8c2SApple OSS Distributions
3026*19c3b8c2SApple OSS Distributions if (uth && workq_threadreq_admissible(wq, uth, req)) {
3027*19c3b8c2SApple OSS Distributions /* This is the case of the rebind - we were about to park and unbind
3028*19c3b8c2SApple OSS Distributions * when more events came so keep the binding.
3029*19c3b8c2SApple OSS Distributions */
3030*19c3b8c2SApple OSS Distributions assert(uth != wq->wq_creator);
3031*19c3b8c2SApple OSS Distributions
3032*19c3b8c2SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
3033*19c3b8c2SApple OSS Distributions _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
3034*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
3035*19c3b8c2SApple OSS Distributions }
3036*19c3b8c2SApple OSS Distributions /*
3037*19c3b8c2SApple OSS Distributions * We're called from workq_kern_threadreq_initiate()
3038*19c3b8c2SApple OSS Distributions * due to an unbind, with the kq req held.
3039*19c3b8c2SApple OSS Distributions */
3040*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
3041*19c3b8c2SApple OSS Distributions workq_trace_req_id(req), req->tr_flags, 0);
3042*19c3b8c2SApple OSS Distributions wq->wq_fulfilled++;
3043*19c3b8c2SApple OSS Distributions
3044*19c3b8c2SApple OSS Distributions kqueue_threadreq_bind(p, req, get_machthread(uth), 0);
3045*19c3b8c2SApple OSS Distributions } else {
3046*19c3b8c2SApple OSS Distributions if (workloop_ts) {
3047*19c3b8c2SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3048*19c3b8c2SApple OSS Distributions turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
3049*19c3b8c2SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
3050*19c3b8c2SApple OSS Distributions turnstile_update_inheritor_complete(workloop_ts,
3051*19c3b8c2SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
3052*19c3b8c2SApple OSS Distributions });
3053*19c3b8c2SApple OSS Distributions }
3054*19c3b8c2SApple OSS Distributions
3055*19c3b8c2SApple OSS Distributions bool reevaluate_creator_thread_group = false;
3056*19c3b8c2SApple OSS Distributions #if CONFIG_PREADOPT_TG
3057*19c3b8c2SApple OSS Distributions reevaluate_creator_thread_group = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3058*19c3b8c2SApple OSS Distributions #endif
3059*19c3b8c2SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3060*19c3b8c2SApple OSS Distributions * the creator needs a thread group pre-adoption */
3061*19c3b8c2SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_thread_group) {
3062*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3063*19c3b8c2SApple OSS Distributions }
3064*19c3b8c2SApple OSS Distributions }
3065*19c3b8c2SApple OSS Distributions
3066*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3067*19c3b8c2SApple OSS Distributions
3068*19c3b8c2SApple OSS Distributions return true;
3069*19c3b8c2SApple OSS Distributions }
3070*19c3b8c2SApple OSS Distributions
3071*19c3b8c2SApple OSS Distributions void
workq_kern_threadreq_modify(struct proc * p,workq_threadreq_t req,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3072*19c3b8c2SApple OSS Distributions workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
3073*19c3b8c2SApple OSS Distributions thread_qos_t qos, workq_kern_threadreq_flags_t flags)
3074*19c3b8c2SApple OSS Distributions {
3075*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3076*19c3b8c2SApple OSS Distributions bool make_overcommit = false;
3077*19c3b8c2SApple OSS Distributions
3078*19c3b8c2SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3079*19c3b8c2SApple OSS Distributions /* Requests outside-of-QoS shouldn't accept modify operations */
3080*19c3b8c2SApple OSS Distributions return;
3081*19c3b8c2SApple OSS Distributions }
3082*19c3b8c2SApple OSS Distributions
3083*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3084*19c3b8c2SApple OSS Distributions
3085*19c3b8c2SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3086*19c3b8c2SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
3087*19c3b8c2SApple OSS Distributions
3088*19c3b8c2SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3089*19c3b8c2SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread, 0);
3090*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3091*19c3b8c2SApple OSS Distributions return;
3092*19c3b8c2SApple OSS Distributions }
3093*19c3b8c2SApple OSS Distributions
3094*19c3b8c2SApple OSS Distributions if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
3095*19c3b8c2SApple OSS Distributions /* TODO (rokhinip): We come into this code path for kqwl thread
3096*19c3b8c2SApple OSS Distributions * requests. kqwl requests cannot be cooperative.
3097*19c3b8c2SApple OSS Distributions */
3098*19c3b8c2SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
3099*19c3b8c2SApple OSS Distributions
3100*19c3b8c2SApple OSS Distributions make_overcommit = workq_threadreq_is_nonovercommit(req);
3101*19c3b8c2SApple OSS Distributions }
3102*19c3b8c2SApple OSS Distributions
3103*19c3b8c2SApple OSS Distributions if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
3104*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3105*19c3b8c2SApple OSS Distributions return;
3106*19c3b8c2SApple OSS Distributions }
3107*19c3b8c2SApple OSS Distributions
3108*19c3b8c2SApple OSS Distributions assert(req->tr_count == 1);
3109*19c3b8c2SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3110*19c3b8c2SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3111*19c3b8c2SApple OSS Distributions }
3112*19c3b8c2SApple OSS Distributions
3113*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
3114*19c3b8c2SApple OSS Distributions workq_trace_req_id(req), qos, 0);
3115*19c3b8c2SApple OSS Distributions
3116*19c3b8c2SApple OSS Distributions struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
3117*19c3b8c2SApple OSS Distributions workq_threadreq_t req_max;
3118*19c3b8c2SApple OSS Distributions
3119*19c3b8c2SApple OSS Distributions /*
3120*19c3b8c2SApple OSS Distributions * Stage 1: Dequeue the request from its priority queue.
3121*19c3b8c2SApple OSS Distributions *
3122*19c3b8c2SApple OSS Distributions * If we dequeue the root item of the constrained priority queue,
3123*19c3b8c2SApple OSS Distributions * maintain the best constrained request qos invariant.
3124*19c3b8c2SApple OSS Distributions */
3125*19c3b8c2SApple OSS Distributions if (priority_queue_remove(pq, &req->tr_entry)) {
3126*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3127*19c3b8c2SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
3128*19c3b8c2SApple OSS Distributions }
3129*19c3b8c2SApple OSS Distributions }
3130*19c3b8c2SApple OSS Distributions
3131*19c3b8c2SApple OSS Distributions /*
3132*19c3b8c2SApple OSS Distributions * Stage 2: Apply changes to the thread request
3133*19c3b8c2SApple OSS Distributions *
3134*19c3b8c2SApple OSS Distributions * If the item will not become the root of the priority queue it belongs to,
3135*19c3b8c2SApple OSS Distributions * then we need to wait in line, just enqueue and return quickly.
3136*19c3b8c2SApple OSS Distributions */
3137*19c3b8c2SApple OSS Distributions if (__improbable(make_overcommit)) {
3138*19c3b8c2SApple OSS Distributions req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
3139*19c3b8c2SApple OSS Distributions pq = workq_priority_queue_for_req(wq, req);
3140*19c3b8c2SApple OSS Distributions }
3141*19c3b8c2SApple OSS Distributions req->tr_qos = qos;
3142*19c3b8c2SApple OSS Distributions
3143*19c3b8c2SApple OSS Distributions req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
3144*19c3b8c2SApple OSS Distributions if (req_max && req_max->tr_qos >= qos) {
3145*19c3b8c2SApple OSS Distributions priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
3146*19c3b8c2SApple OSS Distributions workq_priority_for_req(req), false);
3147*19c3b8c2SApple OSS Distributions priority_queue_insert(pq, &req->tr_entry);
3148*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3149*19c3b8c2SApple OSS Distributions return;
3150*19c3b8c2SApple OSS Distributions }
3151*19c3b8c2SApple OSS Distributions
3152*19c3b8c2SApple OSS Distributions /*
3153*19c3b8c2SApple OSS Distributions * Stage 3: Reevaluate whether we should run the thread request.
3154*19c3b8c2SApple OSS Distributions *
3155*19c3b8c2SApple OSS Distributions * Pretend the thread request is new again:
3156*19c3b8c2SApple OSS Distributions * - adjust wq_reqcount to not count it anymore.
3157*19c3b8c2SApple OSS Distributions * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
3158*19c3b8c2SApple OSS Distributions * properly attempts a synchronous bind)
3159*19c3b8c2SApple OSS Distributions */
3160*19c3b8c2SApple OSS Distributions wq->wq_reqcount--;
3161*19c3b8c2SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3162*19c3b8c2SApple OSS Distributions
3163*19c3b8c2SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3164*19c3b8c2SApple OSS Distributions * the creator needs a thread group pre-adoption if the request got a new TG */
3165*19c3b8c2SApple OSS Distributions bool reevaluate_creator_tg = false;
3166*19c3b8c2SApple OSS Distributions
3167*19c3b8c2SApple OSS Distributions #if CONFIG_PREADOPT_TG
3168*19c3b8c2SApple OSS Distributions reevaluate_creator_tg = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3169*19c3b8c2SApple OSS Distributions #endif
3170*19c3b8c2SApple OSS Distributions
3171*19c3b8c2SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_tg) {
3172*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3173*19c3b8c2SApple OSS Distributions }
3174*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3175*19c3b8c2SApple OSS Distributions }
3176*19c3b8c2SApple OSS Distributions
3177*19c3b8c2SApple OSS Distributions void
workq_kern_threadreq_lock(struct proc * p)3178*19c3b8c2SApple OSS Distributions workq_kern_threadreq_lock(struct proc *p)
3179*19c3b8c2SApple OSS Distributions {
3180*19c3b8c2SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(p));
3181*19c3b8c2SApple OSS Distributions }
3182*19c3b8c2SApple OSS Distributions
3183*19c3b8c2SApple OSS Distributions void
workq_kern_threadreq_unlock(struct proc * p)3184*19c3b8c2SApple OSS Distributions workq_kern_threadreq_unlock(struct proc *p)
3185*19c3b8c2SApple OSS Distributions {
3186*19c3b8c2SApple OSS Distributions workq_unlock(proc_get_wqptr_fast(p));
3187*19c3b8c2SApple OSS Distributions }
3188*19c3b8c2SApple OSS Distributions
3189*19c3b8c2SApple OSS Distributions void
workq_kern_threadreq_update_inheritor(struct proc * p,workq_threadreq_t req,thread_t owner,struct turnstile * wl_ts,turnstile_update_flags_t flags)3190*19c3b8c2SApple OSS Distributions workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
3191*19c3b8c2SApple OSS Distributions thread_t owner, struct turnstile *wl_ts,
3192*19c3b8c2SApple OSS Distributions turnstile_update_flags_t flags)
3193*19c3b8c2SApple OSS Distributions {
3194*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3195*19c3b8c2SApple OSS Distributions turnstile_inheritor_t inheritor;
3196*19c3b8c2SApple OSS Distributions
3197*19c3b8c2SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3198*19c3b8c2SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
3199*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
3200*19c3b8c2SApple OSS Distributions
3201*19c3b8c2SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3202*19c3b8c2SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread,
3203*19c3b8c2SApple OSS Distributions KQUEUE_THREADERQ_BIND_NO_INHERITOR_UPDATE);
3204*19c3b8c2SApple OSS Distributions return;
3205*19c3b8c2SApple OSS Distributions }
3206*19c3b8c2SApple OSS Distributions
3207*19c3b8c2SApple OSS Distributions if (_wq_exiting(wq)) {
3208*19c3b8c2SApple OSS Distributions inheritor = TURNSTILE_INHERITOR_NULL;
3209*19c3b8c2SApple OSS Distributions } else {
3210*19c3b8c2SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3211*19c3b8c2SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3212*19c3b8c2SApple OSS Distributions }
3213*19c3b8c2SApple OSS Distributions
3214*19c3b8c2SApple OSS Distributions if (owner) {
3215*19c3b8c2SApple OSS Distributions inheritor = owner;
3216*19c3b8c2SApple OSS Distributions flags |= TURNSTILE_INHERITOR_THREAD;
3217*19c3b8c2SApple OSS Distributions } else {
3218*19c3b8c2SApple OSS Distributions inheritor = wq->wq_turnstile;
3219*19c3b8c2SApple OSS Distributions flags |= TURNSTILE_INHERITOR_TURNSTILE;
3220*19c3b8c2SApple OSS Distributions }
3221*19c3b8c2SApple OSS Distributions }
3222*19c3b8c2SApple OSS Distributions
3223*19c3b8c2SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3224*19c3b8c2SApple OSS Distributions turnstile_update_inheritor(wl_ts, inheritor, flags);
3225*19c3b8c2SApple OSS Distributions });
3226*19c3b8c2SApple OSS Distributions }
3227*19c3b8c2SApple OSS Distributions
3228*19c3b8c2SApple OSS Distributions void
workq_kern_threadreq_redrive(struct proc * p,workq_kern_threadreq_flags_t flags)3229*19c3b8c2SApple OSS Distributions workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
3230*19c3b8c2SApple OSS Distributions {
3231*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3232*19c3b8c2SApple OSS Distributions
3233*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3234*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3235*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3236*19c3b8c2SApple OSS Distributions }
3237*19c3b8c2SApple OSS Distributions
3238*19c3b8c2SApple OSS Distributions /*
3239*19c3b8c2SApple OSS Distributions * Always called at AST by the thread on itself
3240*19c3b8c2SApple OSS Distributions *
3241*19c3b8c2SApple OSS Distributions * Upon quantum expiry, the workqueue subsystem evaluates its state and decides
3242*19c3b8c2SApple OSS Distributions * on what the thread should do next. The TSD value is always set by the thread
3243*19c3b8c2SApple OSS Distributions * on itself in the kernel and cleared either by userspace when it acks the TSD
3244*19c3b8c2SApple OSS Distributions * value and takes action, or by the thread in the kernel when the quantum
3245*19c3b8c2SApple OSS Distributions * expires again.
3246*19c3b8c2SApple OSS Distributions */
3247*19c3b8c2SApple OSS Distributions void
workq_kern_quantum_expiry_reevaluate(proc_t proc,thread_t thread)3248*19c3b8c2SApple OSS Distributions workq_kern_quantum_expiry_reevaluate(proc_t proc, thread_t thread)
3249*19c3b8c2SApple OSS Distributions {
3250*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
3251*19c3b8c2SApple OSS Distributions
3252*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3253*19c3b8c2SApple OSS Distributions return;
3254*19c3b8c2SApple OSS Distributions }
3255*19c3b8c2SApple OSS Distributions
3256*19c3b8c2SApple OSS Distributions if (!thread_supports_cooperative_workqueue(thread)) {
3257*19c3b8c2SApple OSS Distributions panic("Quantum expired for thread that doesn't support cooperative workqueue");
3258*19c3b8c2SApple OSS Distributions }
3259*19c3b8c2SApple OSS Distributions
3260*19c3b8c2SApple OSS Distributions thread_qos_t qos = uth->uu_workq_pri.qos_bucket;
3261*19c3b8c2SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3262*19c3b8c2SApple OSS Distributions panic("Thread should not have workq bucket of QoS UN");
3263*19c3b8c2SApple OSS Distributions }
3264*19c3b8c2SApple OSS Distributions
3265*19c3b8c2SApple OSS Distributions assert(thread_has_expired_workqueue_quantum(thread, false));
3266*19c3b8c2SApple OSS Distributions
3267*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(proc);
3268*19c3b8c2SApple OSS Distributions assert(wq != NULL);
3269*19c3b8c2SApple OSS Distributions
3270*19c3b8c2SApple OSS Distributions /*
3271*19c3b8c2SApple OSS Distributions * For starters, we're just going to evaluate and see if we need to narrow
3272*19c3b8c2SApple OSS Distributions * the pool and tell this thread to park if needed. In the future, we'll
3273*19c3b8c2SApple OSS Distributions * evaluate and convey other workqueue state information like needing to
3274*19c3b8c2SApple OSS Distributions * pump kevents, etc.
3275*19c3b8c2SApple OSS Distributions */
3276*19c3b8c2SApple OSS Distributions uint64_t flags = 0;
3277*19c3b8c2SApple OSS Distributions
3278*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3279*19c3b8c2SApple OSS Distributions
3280*19c3b8c2SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
3281*19c3b8c2SApple OSS Distributions if (!workq_cooperative_allowance(wq, qos, uth, false)) {
3282*19c3b8c2SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3283*19c3b8c2SApple OSS Distributions } else {
3284*19c3b8c2SApple OSS Distributions /* In the future, when we have kevent hookups for the cooperative
3285*19c3b8c2SApple OSS Distributions * pool, we need fancier logic for what userspace should do. But
3286*19c3b8c2SApple OSS Distributions * right now, only userspace thread requests exist - so we'll just
3287*19c3b8c2SApple OSS Distributions * tell userspace to shuffle work items */
3288*19c3b8c2SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE;
3289*19c3b8c2SApple OSS Distributions }
3290*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
3291*19c3b8c2SApple OSS Distributions if (!workq_constrained_allowance(wq, qos, uth, false)) {
3292*19c3b8c2SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3293*19c3b8c2SApple OSS Distributions }
3294*19c3b8c2SApple OSS Distributions }
3295*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3296*19c3b8c2SApple OSS Distributions
3297*19c3b8c2SApple OSS Distributions WQ_TRACE(TRACE_wq_quantum_expiry_reevaluate, flags, 0, 0, 0);
3298*19c3b8c2SApple OSS Distributions
3299*19c3b8c2SApple OSS Distributions kevent_set_workq_quantum_expiry_user_tsd(proc, thread, flags);
3300*19c3b8c2SApple OSS Distributions
3301*19c3b8c2SApple OSS Distributions /* We have conveyed to userspace about what it needs to do upon quantum
3302*19c3b8c2SApple OSS Distributions * expiry, now rearm the workqueue quantum again */
3303*19c3b8c2SApple OSS Distributions thread_arm_workqueue_quantum(get_machthread(uth));
3304*19c3b8c2SApple OSS Distributions }
3305*19c3b8c2SApple OSS Distributions
3306*19c3b8c2SApple OSS Distributions void
workq_schedule_creator_turnstile_redrive(struct workqueue * wq,bool locked)3307*19c3b8c2SApple OSS Distributions workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
3308*19c3b8c2SApple OSS Distributions {
3309*19c3b8c2SApple OSS Distributions if (locked) {
3310*19c3b8c2SApple OSS Distributions workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
3311*19c3b8c2SApple OSS Distributions } else {
3312*19c3b8c2SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
3313*19c3b8c2SApple OSS Distributions }
3314*19c3b8c2SApple OSS Distributions }
3315*19c3b8c2SApple OSS Distributions
3316*19c3b8c2SApple OSS Distributions static int
workq_thread_return(struct proc * p,struct workq_kernreturn_args * uap,struct workqueue * wq)3317*19c3b8c2SApple OSS Distributions workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
3318*19c3b8c2SApple OSS Distributions struct workqueue *wq)
3319*19c3b8c2SApple OSS Distributions {
3320*19c3b8c2SApple OSS Distributions thread_t th = current_thread();
3321*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3322*19c3b8c2SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
3323*19c3b8c2SApple OSS Distributions workq_threadreq_param_t trp = { };
3324*19c3b8c2SApple OSS Distributions int nevents = uap->affinity, error;
3325*19c3b8c2SApple OSS Distributions user_addr_t eventlist = uap->item;
3326*19c3b8c2SApple OSS Distributions
3327*19c3b8c2SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3328*19c3b8c2SApple OSS Distributions (uth->uu_workq_flags & UT_WORKQ_DYING)) {
3329*19c3b8c2SApple OSS Distributions return EINVAL;
3330*19c3b8c2SApple OSS Distributions }
3331*19c3b8c2SApple OSS Distributions
3332*19c3b8c2SApple OSS Distributions if (eventlist && nevents && kqr == NULL) {
3333*19c3b8c2SApple OSS Distributions return EINVAL;
3334*19c3b8c2SApple OSS Distributions }
3335*19c3b8c2SApple OSS Distributions
3336*19c3b8c2SApple OSS Distributions /* reset signal mask on the workqueue thread to default state */
3337*19c3b8c2SApple OSS Distributions if (uth->uu_sigmask != (sigset_t)(~workq_threadmask)) {
3338*19c3b8c2SApple OSS Distributions proc_lock(p);
3339*19c3b8c2SApple OSS Distributions uth->uu_sigmask = ~workq_threadmask;
3340*19c3b8c2SApple OSS Distributions proc_unlock(p);
3341*19c3b8c2SApple OSS Distributions }
3342*19c3b8c2SApple OSS Distributions
3343*19c3b8c2SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
3344*19c3b8c2SApple OSS Distributions /*
3345*19c3b8c2SApple OSS Distributions * Ensure we store the threadreq param before unbinding
3346*19c3b8c2SApple OSS Distributions * the kqr from this thread.
3347*19c3b8c2SApple OSS Distributions */
3348*19c3b8c2SApple OSS Distributions trp = kqueue_threadreq_workloop_param(kqr);
3349*19c3b8c2SApple OSS Distributions }
3350*19c3b8c2SApple OSS Distributions
3351*19c3b8c2SApple OSS Distributions /*
3352*19c3b8c2SApple OSS Distributions * Freeze the base pri while we decide the fate of this thread.
3353*19c3b8c2SApple OSS Distributions *
3354*19c3b8c2SApple OSS Distributions * Either:
3355*19c3b8c2SApple OSS Distributions * - we return to user and kevent_cleanup will have unfrozen the base pri,
3356*19c3b8c2SApple OSS Distributions * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
3357*19c3b8c2SApple OSS Distributions */
3358*19c3b8c2SApple OSS Distributions thread_freeze_base_pri(th);
3359*19c3b8c2SApple OSS Distributions
3360*19c3b8c2SApple OSS Distributions if (kqr) {
3361*19c3b8c2SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
3362*19c3b8c2SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
3363*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
3364*19c3b8c2SApple OSS Distributions } else {
3365*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
3366*19c3b8c2SApple OSS Distributions }
3367*19c3b8c2SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
3368*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
3369*19c3b8c2SApple OSS Distributions } else {
3370*19c3b8c2SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
3371*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
3372*19c3b8c2SApple OSS Distributions }
3373*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
3374*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
3375*19c3b8c2SApple OSS Distributions } else {
3376*19c3b8c2SApple OSS Distributions upcall_flags |= uth->uu_workq_pri.qos_req |
3377*19c3b8c2SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3378*19c3b8c2SApple OSS Distributions }
3379*19c3b8c2SApple OSS Distributions }
3380*19c3b8c2SApple OSS Distributions error = pthread_functions->workq_handle_stack_events(p, th,
3381*19c3b8c2SApple OSS Distributions get_task_map(proc_task(p)), uth->uu_workq_stackaddr,
3382*19c3b8c2SApple OSS Distributions uth->uu_workq_thport, eventlist, nevents, upcall_flags);
3383*19c3b8c2SApple OSS Distributions if (error) {
3384*19c3b8c2SApple OSS Distributions assert(uth->uu_kqr_bound == kqr);
3385*19c3b8c2SApple OSS Distributions return error;
3386*19c3b8c2SApple OSS Distributions }
3387*19c3b8c2SApple OSS Distributions
3388*19c3b8c2SApple OSS Distributions // pthread is supposed to pass KEVENT_FLAG_PARKING here
3389*19c3b8c2SApple OSS Distributions // which should cause the above call to either:
3390*19c3b8c2SApple OSS Distributions // - not return
3391*19c3b8c2SApple OSS Distributions // - return an error
3392*19c3b8c2SApple OSS Distributions // - return 0 and have unbound properly
3393*19c3b8c2SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3394*19c3b8c2SApple OSS Distributions }
3395*19c3b8c2SApple OSS Distributions
3396*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, uap->options, 0, 0);
3397*19c3b8c2SApple OSS Distributions
3398*19c3b8c2SApple OSS Distributions thread_sched_call(th, NULL);
3399*19c3b8c2SApple OSS Distributions thread_will_park_or_terminate(th);
3400*19c3b8c2SApple OSS Distributions #if CONFIG_WORKLOOP_DEBUG
3401*19c3b8c2SApple OSS Distributions UU_KEVENT_HISTORY_WRITE_ENTRY(uth, { .uu_error = -1, });
3402*19c3b8c2SApple OSS Distributions #endif
3403*19c3b8c2SApple OSS Distributions
3404*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3405*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3406*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
3407*19c3b8c2SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth,
3408*19c3b8c2SApple OSS Distributions WQ_SETUP_CLEAR_VOUCHER);
3409*19c3b8c2SApple OSS Distributions __builtin_unreachable();
3410*19c3b8c2SApple OSS Distributions }
3411*19c3b8c2SApple OSS Distributions
3412*19c3b8c2SApple OSS Distributions /**
3413*19c3b8c2SApple OSS Distributions * Multiplexed call to interact with the workqueue mechanism
3414*19c3b8c2SApple OSS Distributions */
3415*19c3b8c2SApple OSS Distributions int
workq_kernreturn(struct proc * p,struct workq_kernreturn_args * uap,int32_t * retval)3416*19c3b8c2SApple OSS Distributions workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
3417*19c3b8c2SApple OSS Distributions {
3418*19c3b8c2SApple OSS Distributions int options = uap->options;
3419*19c3b8c2SApple OSS Distributions int arg2 = uap->affinity;
3420*19c3b8c2SApple OSS Distributions int arg3 = uap->prio;
3421*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
3422*19c3b8c2SApple OSS Distributions int error = 0;
3423*19c3b8c2SApple OSS Distributions
3424*19c3b8c2SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
3425*19c3b8c2SApple OSS Distributions return EINVAL;
3426*19c3b8c2SApple OSS Distributions }
3427*19c3b8c2SApple OSS Distributions
3428*19c3b8c2SApple OSS Distributions switch (options) {
3429*19c3b8c2SApple OSS Distributions case WQOPS_QUEUE_NEWSPISUPP: {
3430*19c3b8c2SApple OSS Distributions /*
3431*19c3b8c2SApple OSS Distributions * arg2 = offset of serialno into dispatch queue
3432*19c3b8c2SApple OSS Distributions * arg3 = kevent support
3433*19c3b8c2SApple OSS Distributions */
3434*19c3b8c2SApple OSS Distributions int offset = arg2;
3435*19c3b8c2SApple OSS Distributions if (arg3 & 0x01) {
3436*19c3b8c2SApple OSS Distributions // If we get here, then userspace has indicated support for kevent delivery.
3437*19c3b8c2SApple OSS Distributions }
3438*19c3b8c2SApple OSS Distributions
3439*19c3b8c2SApple OSS Distributions p->p_dispatchqueue_serialno_offset = (uint64_t)offset;
3440*19c3b8c2SApple OSS Distributions break;
3441*19c3b8c2SApple OSS Distributions }
3442*19c3b8c2SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS: {
3443*19c3b8c2SApple OSS Distributions /*
3444*19c3b8c2SApple OSS Distributions * arg2 = number of threads to start
3445*19c3b8c2SApple OSS Distributions * arg3 = priority
3446*19c3b8c2SApple OSS Distributions */
3447*19c3b8c2SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, false);
3448*19c3b8c2SApple OSS Distributions break;
3449*19c3b8c2SApple OSS Distributions }
3450*19c3b8c2SApple OSS Distributions /* For requesting threads for the cooperative pool */
3451*19c3b8c2SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS2: {
3452*19c3b8c2SApple OSS Distributions /*
3453*19c3b8c2SApple OSS Distributions * arg2 = number of threads to start
3454*19c3b8c2SApple OSS Distributions * arg3 = priority
3455*19c3b8c2SApple OSS Distributions */
3456*19c3b8c2SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, true);
3457*19c3b8c2SApple OSS Distributions break;
3458*19c3b8c2SApple OSS Distributions }
3459*19c3b8c2SApple OSS Distributions case WQOPS_SET_EVENT_MANAGER_PRIORITY: {
3460*19c3b8c2SApple OSS Distributions /*
3461*19c3b8c2SApple OSS Distributions * arg2 = priority for the manager thread
3462*19c3b8c2SApple OSS Distributions *
3463*19c3b8c2SApple OSS Distributions * if _PTHREAD_PRIORITY_SCHED_PRI_FLAG is set,
3464*19c3b8c2SApple OSS Distributions * the low bits of the value contains a scheduling priority
3465*19c3b8c2SApple OSS Distributions * instead of a QOS value
3466*19c3b8c2SApple OSS Distributions */
3467*19c3b8c2SApple OSS Distributions pthread_priority_t pri = arg2;
3468*19c3b8c2SApple OSS Distributions
3469*19c3b8c2SApple OSS Distributions if (wq == NULL) {
3470*19c3b8c2SApple OSS Distributions error = EINVAL;
3471*19c3b8c2SApple OSS Distributions break;
3472*19c3b8c2SApple OSS Distributions }
3473*19c3b8c2SApple OSS Distributions
3474*19c3b8c2SApple OSS Distributions /*
3475*19c3b8c2SApple OSS Distributions * Normalize the incoming priority so that it is ordered numerically.
3476*19c3b8c2SApple OSS Distributions */
3477*19c3b8c2SApple OSS Distributions if (_pthread_priority_has_sched_pri(pri)) {
3478*19c3b8c2SApple OSS Distributions pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
3479*19c3b8c2SApple OSS Distributions _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
3480*19c3b8c2SApple OSS Distributions } else {
3481*19c3b8c2SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pri);
3482*19c3b8c2SApple OSS Distributions int relpri = _pthread_priority_relpri(pri);
3483*19c3b8c2SApple OSS Distributions if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
3484*19c3b8c2SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
3485*19c3b8c2SApple OSS Distributions error = EINVAL;
3486*19c3b8c2SApple OSS Distributions break;
3487*19c3b8c2SApple OSS Distributions }
3488*19c3b8c2SApple OSS Distributions pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
3489*19c3b8c2SApple OSS Distributions }
3490*19c3b8c2SApple OSS Distributions
3491*19c3b8c2SApple OSS Distributions /*
3492*19c3b8c2SApple OSS Distributions * If userspace passes a scheduling priority, that wins over any QoS.
3493*19c3b8c2SApple OSS Distributions * Userspace should takes care not to lower the priority this way.
3494*19c3b8c2SApple OSS Distributions */
3495*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3496*19c3b8c2SApple OSS Distributions if (wq->wq_event_manager_priority < (uint32_t)pri) {
3497*19c3b8c2SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pri;
3498*19c3b8c2SApple OSS Distributions }
3499*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3500*19c3b8c2SApple OSS Distributions break;
3501*19c3b8c2SApple OSS Distributions }
3502*19c3b8c2SApple OSS Distributions case WQOPS_THREAD_KEVENT_RETURN:
3503*19c3b8c2SApple OSS Distributions case WQOPS_THREAD_WORKLOOP_RETURN:
3504*19c3b8c2SApple OSS Distributions case WQOPS_THREAD_RETURN: {
3505*19c3b8c2SApple OSS Distributions error = workq_thread_return(p, uap, wq);
3506*19c3b8c2SApple OSS Distributions break;
3507*19c3b8c2SApple OSS Distributions }
3508*19c3b8c2SApple OSS Distributions
3509*19c3b8c2SApple OSS Distributions case WQOPS_SHOULD_NARROW: {
3510*19c3b8c2SApple OSS Distributions /*
3511*19c3b8c2SApple OSS Distributions * arg2 = priority to test
3512*19c3b8c2SApple OSS Distributions * arg3 = unused
3513*19c3b8c2SApple OSS Distributions */
3514*19c3b8c2SApple OSS Distributions thread_t th = current_thread();
3515*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3516*19c3b8c2SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3517*19c3b8c2SApple OSS Distributions (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
3518*19c3b8c2SApple OSS Distributions error = EINVAL;
3519*19c3b8c2SApple OSS Distributions break;
3520*19c3b8c2SApple OSS Distributions }
3521*19c3b8c2SApple OSS Distributions
3522*19c3b8c2SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(arg2);
3523*19c3b8c2SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3524*19c3b8c2SApple OSS Distributions error = EINVAL;
3525*19c3b8c2SApple OSS Distributions break;
3526*19c3b8c2SApple OSS Distributions }
3527*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3528*19c3b8c2SApple OSS Distributions bool should_narrow = !workq_constrained_allowance(wq, qos, uth, false);
3529*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3530*19c3b8c2SApple OSS Distributions
3531*19c3b8c2SApple OSS Distributions *retval = should_narrow;
3532*19c3b8c2SApple OSS Distributions break;
3533*19c3b8c2SApple OSS Distributions }
3534*19c3b8c2SApple OSS Distributions case WQOPS_SETUP_DISPATCH: {
3535*19c3b8c2SApple OSS Distributions /*
3536*19c3b8c2SApple OSS Distributions * item = pointer to workq_dispatch_config structure
3537*19c3b8c2SApple OSS Distributions * arg2 = sizeof(item)
3538*19c3b8c2SApple OSS Distributions */
3539*19c3b8c2SApple OSS Distributions struct workq_dispatch_config cfg;
3540*19c3b8c2SApple OSS Distributions bzero(&cfg, sizeof(cfg));
3541*19c3b8c2SApple OSS Distributions
3542*19c3b8c2SApple OSS Distributions error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
3543*19c3b8c2SApple OSS Distributions if (error) {
3544*19c3b8c2SApple OSS Distributions break;
3545*19c3b8c2SApple OSS Distributions }
3546*19c3b8c2SApple OSS Distributions
3547*19c3b8c2SApple OSS Distributions if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
3548*19c3b8c2SApple OSS Distributions cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
3549*19c3b8c2SApple OSS Distributions error = ENOTSUP;
3550*19c3b8c2SApple OSS Distributions break;
3551*19c3b8c2SApple OSS Distributions }
3552*19c3b8c2SApple OSS Distributions
3553*19c3b8c2SApple OSS Distributions /* Load fields from version 1 */
3554*19c3b8c2SApple OSS Distributions p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
3555*19c3b8c2SApple OSS Distributions
3556*19c3b8c2SApple OSS Distributions /* Load fields from version 2 */
3557*19c3b8c2SApple OSS Distributions if (cfg.wdc_version >= 2) {
3558*19c3b8c2SApple OSS Distributions p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
3559*19c3b8c2SApple OSS Distributions }
3560*19c3b8c2SApple OSS Distributions
3561*19c3b8c2SApple OSS Distributions break;
3562*19c3b8c2SApple OSS Distributions }
3563*19c3b8c2SApple OSS Distributions default:
3564*19c3b8c2SApple OSS Distributions error = EINVAL;
3565*19c3b8c2SApple OSS Distributions break;
3566*19c3b8c2SApple OSS Distributions }
3567*19c3b8c2SApple OSS Distributions
3568*19c3b8c2SApple OSS Distributions return error;
3569*19c3b8c2SApple OSS Distributions }
3570*19c3b8c2SApple OSS Distributions
3571*19c3b8c2SApple OSS Distributions /*
3572*19c3b8c2SApple OSS Distributions * We have no work to do, park ourselves on the idle list.
3573*19c3b8c2SApple OSS Distributions *
3574*19c3b8c2SApple OSS Distributions * Consumes the workqueue lock and does not return.
3575*19c3b8c2SApple OSS Distributions */
3576*19c3b8c2SApple OSS Distributions __attribute__((noreturn, noinline))
3577*19c3b8c2SApple OSS Distributions static void
workq_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)3578*19c3b8c2SApple OSS Distributions workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
3579*19c3b8c2SApple OSS Distributions uint32_t setup_flags)
3580*19c3b8c2SApple OSS Distributions {
3581*19c3b8c2SApple OSS Distributions assert(uth == current_uthread());
3582*19c3b8c2SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3583*19c3b8c2SApple OSS Distributions workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
3584*19c3b8c2SApple OSS Distributions
3585*19c3b8c2SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
3586*19c3b8c2SApple OSS Distributions
3587*19c3b8c2SApple OSS Distributions #if CONFIG_PREADOPT_TG
3588*19c3b8c2SApple OSS Distributions /* Clear the preadoption thread group on the thread.
3589*19c3b8c2SApple OSS Distributions *
3590*19c3b8c2SApple OSS Distributions * Case 1:
3591*19c3b8c2SApple OSS Distributions * Creator thread which never picked up a thread request. We set a
3592*19c3b8c2SApple OSS Distributions * preadoption thread group on creator threads but if it never picked
3593*19c3b8c2SApple OSS Distributions * up a thread request and didn't go to userspace, then the thread will
3594*19c3b8c2SApple OSS Distributions * park with a preadoption thread group but no explicitly adopted
3595*19c3b8c2SApple OSS Distributions * voucher or work interval.
3596*19c3b8c2SApple OSS Distributions *
3597*19c3b8c2SApple OSS Distributions * We drop the preadoption thread group here before proceeding to park.
3598*19c3b8c2SApple OSS Distributions * Note - we may get preempted when we drop the workq lock below.
3599*19c3b8c2SApple OSS Distributions *
3600*19c3b8c2SApple OSS Distributions * Case 2:
3601*19c3b8c2SApple OSS Distributions * Thread picked up a thread request and bound to it and returned back
3602*19c3b8c2SApple OSS Distributions * from userspace and is parking. At this point, preadoption thread
3603*19c3b8c2SApple OSS Distributions * group should be NULL since the thread has unbound from the thread
3604*19c3b8c2SApple OSS Distributions * request. So this operation should be a no-op.
3605*19c3b8c2SApple OSS Distributions */
3606*19c3b8c2SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
3607*19c3b8c2SApple OSS Distributions #endif
3608*19c3b8c2SApple OSS Distributions
3609*19c3b8c2SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
3610*19c3b8c2SApple OSS Distributions !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
3611*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3612*19c3b8c2SApple OSS Distributions
3613*19c3b8c2SApple OSS Distributions /*
3614*19c3b8c2SApple OSS Distributions * workq_push_idle_thread() will unset `has_stack`
3615*19c3b8c2SApple OSS Distributions * if it wants us to free the stack before parking.
3616*19c3b8c2SApple OSS Distributions */
3617*19c3b8c2SApple OSS Distributions if (!uth->uu_save.uus_workq_park_data.has_stack) {
3618*19c3b8c2SApple OSS Distributions pthread_functions->workq_markfree_threadstack(p,
3619*19c3b8c2SApple OSS Distributions get_machthread(uth), get_task_map(proc_task(p)),
3620*19c3b8c2SApple OSS Distributions uth->uu_workq_stackaddr);
3621*19c3b8c2SApple OSS Distributions }
3622*19c3b8c2SApple OSS Distributions
3623*19c3b8c2SApple OSS Distributions /*
3624*19c3b8c2SApple OSS Distributions * When we remove the voucher from the thread, we may lose our importance
3625*19c3b8c2SApple OSS Distributions * causing us to get preempted, so we do this after putting the thread on
3626*19c3b8c2SApple OSS Distributions * the idle list. Then, when we get our importance back we'll be able to
3627*19c3b8c2SApple OSS Distributions * use this thread from e.g. the kevent call out to deliver a boosting
3628*19c3b8c2SApple OSS Distributions * message.
3629*19c3b8c2SApple OSS Distributions *
3630*19c3b8c2SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
3631*19c3b8c2SApple OSS Distributions * thread since this thread could have become the creator again and
3632*19c3b8c2SApple OSS Distributions * perhaps acquired a preadoption thread group.
3633*19c3b8c2SApple OSS Distributions */
3634*19c3b8c2SApple OSS Distributions __assert_only kern_return_t kr;
3635*19c3b8c2SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3636*19c3b8c2SApple OSS Distributions assert(kr == KERN_SUCCESS);
3637*19c3b8c2SApple OSS Distributions
3638*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
3639*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
3640*19c3b8c2SApple OSS Distributions setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
3641*19c3b8c2SApple OSS Distributions }
3642*19c3b8c2SApple OSS Distributions
3643*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3644*19c3b8c2SApple OSS Distributions
3645*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
3646*19c3b8c2SApple OSS Distributions /*
3647*19c3b8c2SApple OSS Distributions * While we'd dropped the lock to unset our voucher, someone came
3648*19c3b8c2SApple OSS Distributions * around and made us runnable. But because we weren't waiting on the
3649*19c3b8c2SApple OSS Distributions * event their thread_wakeup() was ineffectual. To correct for that,
3650*19c3b8c2SApple OSS Distributions * we just run the continuation ourselves.
3651*19c3b8c2SApple OSS Distributions */
3652*19c3b8c2SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
3653*19c3b8c2SApple OSS Distributions __builtin_unreachable();
3654*19c3b8c2SApple OSS Distributions }
3655*19c3b8c2SApple OSS Distributions
3656*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3657*19c3b8c2SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
3658*19c3b8c2SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
3659*19c3b8c2SApple OSS Distributions __builtin_unreachable();
3660*19c3b8c2SApple OSS Distributions }
3661*19c3b8c2SApple OSS Distributions
3662*19c3b8c2SApple OSS Distributions /* Disarm the workqueue quantum since the thread is now idle */
3663*19c3b8c2SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3664*19c3b8c2SApple OSS Distributions
3665*19c3b8c2SApple OSS Distributions thread_set_pending_block_hint(get_machthread(uth), kThreadWaitParkedWorkQueue);
3666*19c3b8c2SApple OSS Distributions assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
3667*19c3b8c2SApple OSS Distributions workq_unlock(wq);
3668*19c3b8c2SApple OSS Distributions thread_block(workq_unpark_continue);
3669*19c3b8c2SApple OSS Distributions __builtin_unreachable();
3670*19c3b8c2SApple OSS Distributions }
3671*19c3b8c2SApple OSS Distributions
3672*19c3b8c2SApple OSS Distributions static inline bool
workq_may_start_event_mgr_thread(struct workqueue * wq,struct uthread * uth)3673*19c3b8c2SApple OSS Distributions workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth)
3674*19c3b8c2SApple OSS Distributions {
3675*19c3b8c2SApple OSS Distributions /*
3676*19c3b8c2SApple OSS Distributions * There's an event manager request and either:
3677*19c3b8c2SApple OSS Distributions * - no event manager currently running
3678*19c3b8c2SApple OSS Distributions * - we are re-using the event manager
3679*19c3b8c2SApple OSS Distributions */
3680*19c3b8c2SApple OSS Distributions return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
3681*19c3b8c2SApple OSS Distributions (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
3682*19c3b8c2SApple OSS Distributions }
3683*19c3b8c2SApple OSS Distributions
3684*19c3b8c2SApple OSS Distributions static uint32_t
workq_constrained_allowance(struct workqueue * wq,thread_qos_t at_qos,struct uthread * uth,bool may_start_timer)3685*19c3b8c2SApple OSS Distributions workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
3686*19c3b8c2SApple OSS Distributions struct uthread *uth, bool may_start_timer)
3687*19c3b8c2SApple OSS Distributions {
3688*19c3b8c2SApple OSS Distributions assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
3689*19c3b8c2SApple OSS Distributions uint32_t count = 0;
3690*19c3b8c2SApple OSS Distributions
3691*19c3b8c2SApple OSS Distributions uint32_t max_count = wq->wq_constrained_threads_scheduled;
3692*19c3b8c2SApple OSS Distributions if (uth && workq_thread_is_nonovercommit(uth)) {
3693*19c3b8c2SApple OSS Distributions /*
3694*19c3b8c2SApple OSS Distributions * don't count the current thread as scheduled
3695*19c3b8c2SApple OSS Distributions */
3696*19c3b8c2SApple OSS Distributions assert(max_count > 0);
3697*19c3b8c2SApple OSS Distributions max_count--;
3698*19c3b8c2SApple OSS Distributions }
3699*19c3b8c2SApple OSS Distributions if (max_count >= wq_max_constrained_threads) {
3700*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
3701*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled,
3702*19c3b8c2SApple OSS Distributions wq_max_constrained_threads);
3703*19c3b8c2SApple OSS Distributions /*
3704*19c3b8c2SApple OSS Distributions * we need 1 or more constrained threads to return to the kernel before
3705*19c3b8c2SApple OSS Distributions * we can dispatch additional work
3706*19c3b8c2SApple OSS Distributions */
3707*19c3b8c2SApple OSS Distributions return 0;
3708*19c3b8c2SApple OSS Distributions }
3709*19c3b8c2SApple OSS Distributions max_count -= wq_max_constrained_threads;
3710*19c3b8c2SApple OSS Distributions
3711*19c3b8c2SApple OSS Distributions /*
3712*19c3b8c2SApple OSS Distributions * Compute a metric for many how many threads are active. We find the
3713*19c3b8c2SApple OSS Distributions * highest priority request outstanding and then add up the number of active
3714*19c3b8c2SApple OSS Distributions * threads in that and all higher-priority buckets. We'll also add any
3715*19c3b8c2SApple OSS Distributions * "busy" threads which are not currently active but blocked recently enough
3716*19c3b8c2SApple OSS Distributions * that we can't be sure that they won't be unblocked soon and start
3717*19c3b8c2SApple OSS Distributions * being active again.
3718*19c3b8c2SApple OSS Distributions *
3719*19c3b8c2SApple OSS Distributions * We'll then compare this metric to our max concurrency to decide whether
3720*19c3b8c2SApple OSS Distributions * to add a new thread.
3721*19c3b8c2SApple OSS Distributions */
3722*19c3b8c2SApple OSS Distributions
3723*19c3b8c2SApple OSS Distributions uint32_t busycount, thactive_count;
3724*19c3b8c2SApple OSS Distributions
3725*19c3b8c2SApple OSS Distributions thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
3726*19c3b8c2SApple OSS Distributions at_qos, &busycount, NULL);
3727*19c3b8c2SApple OSS Distributions
3728*19c3b8c2SApple OSS Distributions if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
3729*19c3b8c2SApple OSS Distributions at_qos <= uth->uu_workq_pri.qos_bucket) {
3730*19c3b8c2SApple OSS Distributions /*
3731*19c3b8c2SApple OSS Distributions * Don't count this thread as currently active, but only if it's not
3732*19c3b8c2SApple OSS Distributions * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
3733*19c3b8c2SApple OSS Distributions * managers.
3734*19c3b8c2SApple OSS Distributions */
3735*19c3b8c2SApple OSS Distributions assert(thactive_count > 0);
3736*19c3b8c2SApple OSS Distributions thactive_count--;
3737*19c3b8c2SApple OSS Distributions }
3738*19c3b8c2SApple OSS Distributions
3739*19c3b8c2SApple OSS Distributions count = wq_max_parallelism[_wq_bucket(at_qos)];
3740*19c3b8c2SApple OSS Distributions if (count > thactive_count + busycount) {
3741*19c3b8c2SApple OSS Distributions count -= thactive_count + busycount;
3742*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
3743*19c3b8c2SApple OSS Distributions thactive_count, busycount);
3744*19c3b8c2SApple OSS Distributions return MIN(count, max_count);
3745*19c3b8c2SApple OSS Distributions } else {
3746*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
3747*19c3b8c2SApple OSS Distributions thactive_count, busycount);
3748*19c3b8c2SApple OSS Distributions }
3749*19c3b8c2SApple OSS Distributions
3750*19c3b8c2SApple OSS Distributions if (may_start_timer) {
3751*19c3b8c2SApple OSS Distributions /*
3752*19c3b8c2SApple OSS Distributions * If this is called from the add timer, we won't have another timer
3753*19c3b8c2SApple OSS Distributions * fire when the thread exits the "busy" state, so rearm the timer.
3754*19c3b8c2SApple OSS Distributions */
3755*19c3b8c2SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
3756*19c3b8c2SApple OSS Distributions }
3757*19c3b8c2SApple OSS Distributions
3758*19c3b8c2SApple OSS Distributions return 0;
3759*19c3b8c2SApple OSS Distributions }
3760*19c3b8c2SApple OSS Distributions
3761*19c3b8c2SApple OSS Distributions static bool
workq_threadreq_admissible(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req)3762*19c3b8c2SApple OSS Distributions workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
3763*19c3b8c2SApple OSS Distributions workq_threadreq_t req)
3764*19c3b8c2SApple OSS Distributions {
3765*19c3b8c2SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
3766*19c3b8c2SApple OSS Distributions return workq_may_start_event_mgr_thread(wq, uth);
3767*19c3b8c2SApple OSS Distributions }
3768*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
3769*19c3b8c2SApple OSS Distributions return workq_cooperative_allowance(wq, req->tr_qos, uth, true);
3770*19c3b8c2SApple OSS Distributions }
3771*19c3b8c2SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3772*19c3b8c2SApple OSS Distributions return workq_constrained_allowance(wq, req->tr_qos, uth, true);
3773*19c3b8c2SApple OSS Distributions }
3774*19c3b8c2SApple OSS Distributions
3775*19c3b8c2SApple OSS Distributions return true;
3776*19c3b8c2SApple OSS Distributions }
3777*19c3b8c2SApple OSS Distributions
3778*19c3b8c2SApple OSS Distributions /*
3779*19c3b8c2SApple OSS Distributions * Called from the context of selecting thread requests for threads returning
3780*19c3b8c2SApple OSS Distributions * from userspace or creator thread
3781*19c3b8c2SApple OSS Distributions */
3782*19c3b8c2SApple OSS Distributions static workq_threadreq_t
workq_cooperative_queue_best_req(struct workqueue * wq,struct uthread * uth)3783*19c3b8c2SApple OSS Distributions workq_cooperative_queue_best_req(struct workqueue *wq, struct uthread *uth)
3784*19c3b8c2SApple OSS Distributions {
3785*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
3786*19c3b8c2SApple OSS Distributions
3787*19c3b8c2SApple OSS Distributions /*
3788*19c3b8c2SApple OSS Distributions * If the current thread is cooperative, we need to exclude it as part of
3789*19c3b8c2SApple OSS Distributions * cooperative schedule count since this thread is looking for a new
3790*19c3b8c2SApple OSS Distributions * request. Change in the schedule count for cooperative pool therefore
3791*19c3b8c2SApple OSS Distributions * requires us to reeevaluate the next best request for it.
3792*19c3b8c2SApple OSS Distributions */
3793*19c3b8c2SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
3794*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
3795*19c3b8c2SApple OSS Distributions
3796*19c3b8c2SApple OSS Distributions (void) _wq_cooperative_queue_refresh_best_req_qos(wq);
3797*19c3b8c2SApple OSS Distributions
3798*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
3799*19c3b8c2SApple OSS Distributions } else {
3800*19c3b8c2SApple OSS Distributions /*
3801*19c3b8c2SApple OSS Distributions * The old value that was already precomputed should be safe to use -
3802*19c3b8c2SApple OSS Distributions * add an assert that asserts that the best req QoS doesn't change in
3803*19c3b8c2SApple OSS Distributions * this case
3804*19c3b8c2SApple OSS Distributions */
3805*19c3b8c2SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
3806*19c3b8c2SApple OSS Distributions }
3807*19c3b8c2SApple OSS Distributions
3808*19c3b8c2SApple OSS Distributions thread_qos_t qos = wq->wq_cooperative_queue_best_req_qos;
3809*19c3b8c2SApple OSS Distributions
3810*19c3b8c2SApple OSS Distributions /* There are no eligible requests in the cooperative pool */
3811*19c3b8c2SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3812*19c3b8c2SApple OSS Distributions return NULL;
3813*19c3b8c2SApple OSS Distributions }
3814*19c3b8c2SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_ABOVEUI);
3815*19c3b8c2SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_MANAGER);
3816*19c3b8c2SApple OSS Distributions
3817*19c3b8c2SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
3818*19c3b8c2SApple OSS Distributions assert(!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket]));
3819*19c3b8c2SApple OSS Distributions
3820*19c3b8c2SApple OSS Distributions return STAILQ_FIRST(&wq->wq_cooperative_queue[bucket]);
3821*19c3b8c2SApple OSS Distributions }
3822*19c3b8c2SApple OSS Distributions
3823*19c3b8c2SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue * wq)3824*19c3b8c2SApple OSS Distributions workq_threadreq_select_for_creator(struct workqueue *wq)
3825*19c3b8c2SApple OSS Distributions {
3826*19c3b8c2SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
3827*19c3b8c2SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
3828*19c3b8c2SApple OSS Distributions uint8_t pri = 0;
3829*19c3b8c2SApple OSS Distributions
3830*19c3b8c2SApple OSS Distributions /*
3831*19c3b8c2SApple OSS Distributions * Compute the best priority request, and ignore the turnstile for now
3832*19c3b8c2SApple OSS Distributions */
3833*19c3b8c2SApple OSS Distributions
3834*19c3b8c2SApple OSS Distributions req_pri = priority_queue_max(&wq->wq_special_queue,
3835*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
3836*19c3b8c2SApple OSS Distributions if (req_pri) {
3837*19c3b8c2SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
3838*19c3b8c2SApple OSS Distributions &req_pri->tr_entry);
3839*19c3b8c2SApple OSS Distributions }
3840*19c3b8c2SApple OSS Distributions
3841*19c3b8c2SApple OSS Distributions /*
3842*19c3b8c2SApple OSS Distributions * Handle the manager thread request. The special queue might yield
3843*19c3b8c2SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
3844*19c3b8c2SApple OSS Distributions */
3845*19c3b8c2SApple OSS Distributions
3846*19c3b8c2SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
3847*19c3b8c2SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
3848*19c3b8c2SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
3849*19c3b8c2SApple OSS Distributions
3850*19c3b8c2SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
3851*19c3b8c2SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
3852*19c3b8c2SApple OSS Distributions } else {
3853*19c3b8c2SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
3854*19c3b8c2SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
3855*19c3b8c2SApple OSS Distributions }
3856*19c3b8c2SApple OSS Distributions
3857*19c3b8c2SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
3858*19c3b8c2SApple OSS Distributions }
3859*19c3b8c2SApple OSS Distributions
3860*19c3b8c2SApple OSS Distributions /*
3861*19c3b8c2SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
3862*19c3b8c2SApple OSS Distributions *
3863*19c3b8c2SApple OSS Distributions * Start by comparing the overcommit and the cooperative pool
3864*19c3b8c2SApple OSS Distributions */
3865*19c3b8c2SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
3866*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
3867*19c3b8c2SApple OSS Distributions if (req_qos) {
3868*19c3b8c2SApple OSS Distributions qos = req_qos->tr_qos;
3869*19c3b8c2SApple OSS Distributions }
3870*19c3b8c2SApple OSS Distributions
3871*19c3b8c2SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, NULL);
3872*19c3b8c2SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
3873*19c3b8c2SApple OSS Distributions /*
3874*19c3b8c2SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
3875*19c3b8c2SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
3876*19c3b8c2SApple OSS Distributions * cooperative.
3877*19c3b8c2SApple OSS Distributions *
3878*19c3b8c2SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
3879*19c3b8c2SApple OSS Distributions */
3880*19c3b8c2SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, NULL, true)) {
3881*19c3b8c2SApple OSS Distributions req_qos = req_tmp;
3882*19c3b8c2SApple OSS Distributions qos = req_qos->tr_qos;
3883*19c3b8c2SApple OSS Distributions }
3884*19c3b8c2SApple OSS Distributions }
3885*19c3b8c2SApple OSS Distributions
3886*19c3b8c2SApple OSS Distributions /*
3887*19c3b8c2SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
3888*19c3b8c2SApple OSS Distributions * pool - and compare it with the constrained pool
3889*19c3b8c2SApple OSS Distributions */
3890*19c3b8c2SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
3891*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
3892*19c3b8c2SApple OSS Distributions
3893*19c3b8c2SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
3894*19c3b8c2SApple OSS Distributions /*
3895*19c3b8c2SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
3896*19c3b8c2SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
3897*19c3b8c2SApple OSS Distributions */
3898*19c3b8c2SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
3899*19c3b8c2SApple OSS Distributions return req_pri;
3900*19c3b8c2SApple OSS Distributions }
3901*19c3b8c2SApple OSS Distributions
3902*19c3b8c2SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, NULL, true)) {
3903*19c3b8c2SApple OSS Distributions /*
3904*19c3b8c2SApple OSS Distributions * If the constrained thread request is the best one and passes
3905*19c3b8c2SApple OSS Distributions * the admission check, pick it.
3906*19c3b8c2SApple OSS Distributions */
3907*19c3b8c2SApple OSS Distributions return req_tmp;
3908*19c3b8c2SApple OSS Distributions }
3909*19c3b8c2SApple OSS Distributions }
3910*19c3b8c2SApple OSS Distributions
3911*19c3b8c2SApple OSS Distributions /*
3912*19c3b8c2SApple OSS Distributions * Compare the best of the QoS world with the priority
3913*19c3b8c2SApple OSS Distributions */
3914*19c3b8c2SApple OSS Distributions if (pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
3915*19c3b8c2SApple OSS Distributions return req_pri;
3916*19c3b8c2SApple OSS Distributions }
3917*19c3b8c2SApple OSS Distributions
3918*19c3b8c2SApple OSS Distributions if (req_qos) {
3919*19c3b8c2SApple OSS Distributions return req_qos;
3920*19c3b8c2SApple OSS Distributions }
3921*19c3b8c2SApple OSS Distributions
3922*19c3b8c2SApple OSS Distributions /*
3923*19c3b8c2SApple OSS Distributions * If we had no eligible request but we have a turnstile push,
3924*19c3b8c2SApple OSS Distributions * it must be a non overcommit thread request that failed
3925*19c3b8c2SApple OSS Distributions * the admission check.
3926*19c3b8c2SApple OSS Distributions *
3927*19c3b8c2SApple OSS Distributions * Just fake a BG thread request so that if the push stops the creator
3928*19c3b8c2SApple OSS Distributions * priority just drops to 4.
3929*19c3b8c2SApple OSS Distributions */
3930*19c3b8c2SApple OSS Distributions if (turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, NULL)) {
3931*19c3b8c2SApple OSS Distributions static struct workq_threadreq_s workq_sync_push_fake_req = {
3932*19c3b8c2SApple OSS Distributions .tr_qos = THREAD_QOS_BACKGROUND,
3933*19c3b8c2SApple OSS Distributions };
3934*19c3b8c2SApple OSS Distributions
3935*19c3b8c2SApple OSS Distributions return &workq_sync_push_fake_req;
3936*19c3b8c2SApple OSS Distributions }
3937*19c3b8c2SApple OSS Distributions
3938*19c3b8c2SApple OSS Distributions return NULL;
3939*19c3b8c2SApple OSS Distributions }
3940*19c3b8c2SApple OSS Distributions
3941*19c3b8c2SApple OSS Distributions /*
3942*19c3b8c2SApple OSS Distributions * Returns true if this caused a change in the schedule counts of the
3943*19c3b8c2SApple OSS Distributions * cooperative pool
3944*19c3b8c2SApple OSS Distributions */
3945*19c3b8c2SApple OSS Distributions static bool
workq_adjust_cooperative_constrained_schedule_counts(struct workqueue * wq,struct uthread * uth,thread_qos_t old_thread_qos,workq_tr_flags_t tr_flags)3946*19c3b8c2SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(struct workqueue *wq,
3947*19c3b8c2SApple OSS Distributions struct uthread *uth, thread_qos_t old_thread_qos, workq_tr_flags_t tr_flags)
3948*19c3b8c2SApple OSS Distributions {
3949*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
3950*19c3b8c2SApple OSS Distributions
3951*19c3b8c2SApple OSS Distributions /*
3952*19c3b8c2SApple OSS Distributions * Row: thread type
3953*19c3b8c2SApple OSS Distributions * Column: Request type
3954*19c3b8c2SApple OSS Distributions *
3955*19c3b8c2SApple OSS Distributions * overcommit non-overcommit cooperative
3956*19c3b8c2SApple OSS Distributions * overcommit X case 1 case 2
3957*19c3b8c2SApple OSS Distributions * cooperative case 3 case 4 case 5
3958*19c3b8c2SApple OSS Distributions * non-overcommit case 6 X case 7
3959*19c3b8c2SApple OSS Distributions *
3960*19c3b8c2SApple OSS Distributions * Move the thread to the right bucket depending on what state it currently
3961*19c3b8c2SApple OSS Distributions * has and what state the thread req it picks, is going to have.
3962*19c3b8c2SApple OSS Distributions *
3963*19c3b8c2SApple OSS Distributions * Note that the creator thread is an overcommit thread.
3964*19c3b8c2SApple OSS Distributions */
3965*19c3b8c2SApple OSS Distributions thread_qos_t new_thread_qos = uth->uu_workq_pri.qos_req;
3966*19c3b8c2SApple OSS Distributions
3967*19c3b8c2SApple OSS Distributions /*
3968*19c3b8c2SApple OSS Distributions * Anytime a cooperative bucket's schedule count changes, we need to
3969*19c3b8c2SApple OSS Distributions * potentially refresh the next best QoS for that pool when we determine
3970*19c3b8c2SApple OSS Distributions * the next request for the creator
3971*19c3b8c2SApple OSS Distributions */
3972*19c3b8c2SApple OSS Distributions bool cooperative_pool_sched_count_changed = false;
3973*19c3b8c2SApple OSS Distributions
3974*19c3b8c2SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
3975*19c3b8c2SApple OSS Distributions if (workq_tr_is_nonovercommit(tr_flags)) {
3976*19c3b8c2SApple OSS Distributions // Case 1: thread is overcommit, req is non-overcommit
3977*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
3978*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
3979*19c3b8c2SApple OSS Distributions // Case 2: thread is overcommit, req is cooperative
3980*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
3981*19c3b8c2SApple OSS Distributions cooperative_pool_sched_count_changed = true;
3982*19c3b8c2SApple OSS Distributions }
3983*19c3b8c2SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
3984*19c3b8c2SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
3985*19c3b8c2SApple OSS Distributions // Case 3: thread is cooperative, req is overcommit
3986*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
3987*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_nonovercommit(tr_flags)) {
3988*19c3b8c2SApple OSS Distributions // Case 4: thread is cooperative, req is non-overcommit
3989*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
3990*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
3991*19c3b8c2SApple OSS Distributions } else {
3992*19c3b8c2SApple OSS Distributions // Case 5: thread is cooperative, req is also cooperative
3993*19c3b8c2SApple OSS Distributions assert(workq_tr_is_cooperative(tr_flags));
3994*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
3995*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
3996*19c3b8c2SApple OSS Distributions }
3997*19c3b8c2SApple OSS Distributions cooperative_pool_sched_count_changed = true;
3998*19c3b8c2SApple OSS Distributions } else {
3999*19c3b8c2SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4000*19c3b8c2SApple OSS Distributions // Case 6: Thread is non-overcommit, req is overcommit
4001*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4002*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4003*19c3b8c2SApple OSS Distributions // Case 7: Thread is non-overcommit, req is cooperative
4004*19c3b8c2SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4005*19c3b8c2SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4006*19c3b8c2SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4007*19c3b8c2SApple OSS Distributions }
4008*19c3b8c2SApple OSS Distributions }
4009*19c3b8c2SApple OSS Distributions
4010*19c3b8c2SApple OSS Distributions return cooperative_pool_sched_count_changed;
4011*19c3b8c2SApple OSS Distributions }
4012*19c3b8c2SApple OSS Distributions
4013*19c3b8c2SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select(struct workqueue * wq,struct uthread * uth)4014*19c3b8c2SApple OSS Distributions workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
4015*19c3b8c2SApple OSS Distributions {
4016*19c3b8c2SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4017*19c3b8c2SApple OSS Distributions uintptr_t proprietor;
4018*19c3b8c2SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4019*19c3b8c2SApple OSS Distributions uint8_t pri = 0;
4020*19c3b8c2SApple OSS Distributions
4021*19c3b8c2SApple OSS Distributions if (uth == wq->wq_creator) {
4022*19c3b8c2SApple OSS Distributions uth = NULL;
4023*19c3b8c2SApple OSS Distributions }
4024*19c3b8c2SApple OSS Distributions
4025*19c3b8c2SApple OSS Distributions /*
4026*19c3b8c2SApple OSS Distributions * Compute the best priority request (special or turnstile)
4027*19c3b8c2SApple OSS Distributions */
4028*19c3b8c2SApple OSS Distributions
4029*19c3b8c2SApple OSS Distributions pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
4030*19c3b8c2SApple OSS Distributions &proprietor);
4031*19c3b8c2SApple OSS Distributions if (pri) {
4032*19c3b8c2SApple OSS Distributions struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
4033*19c3b8c2SApple OSS Distributions req_pri = &kqwl->kqwl_request;
4034*19c3b8c2SApple OSS Distributions if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
4035*19c3b8c2SApple OSS Distributions panic("Invalid thread request (%p) state %d",
4036*19c3b8c2SApple OSS Distributions req_pri, req_pri->tr_state);
4037*19c3b8c2SApple OSS Distributions }
4038*19c3b8c2SApple OSS Distributions } else {
4039*19c3b8c2SApple OSS Distributions req_pri = NULL;
4040*19c3b8c2SApple OSS Distributions }
4041*19c3b8c2SApple OSS Distributions
4042*19c3b8c2SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_special_queue,
4043*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4044*19c3b8c2SApple OSS Distributions if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
4045*19c3b8c2SApple OSS Distributions &req_tmp->tr_entry)) {
4046*19c3b8c2SApple OSS Distributions req_pri = req_tmp;
4047*19c3b8c2SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4048*19c3b8c2SApple OSS Distributions &req_tmp->tr_entry);
4049*19c3b8c2SApple OSS Distributions }
4050*19c3b8c2SApple OSS Distributions
4051*19c3b8c2SApple OSS Distributions /*
4052*19c3b8c2SApple OSS Distributions * Handle the manager thread request. The special queue might yield
4053*19c3b8c2SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4054*19c3b8c2SApple OSS Distributions */
4055*19c3b8c2SApple OSS Distributions
4056*19c3b8c2SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4057*19c3b8c2SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
4058*19c3b8c2SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4059*19c3b8c2SApple OSS Distributions
4060*19c3b8c2SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4061*19c3b8c2SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4062*19c3b8c2SApple OSS Distributions } else {
4063*19c3b8c2SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4064*19c3b8c2SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4065*19c3b8c2SApple OSS Distributions }
4066*19c3b8c2SApple OSS Distributions
4067*19c3b8c2SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4068*19c3b8c2SApple OSS Distributions }
4069*19c3b8c2SApple OSS Distributions
4070*19c3b8c2SApple OSS Distributions /*
4071*19c3b8c2SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4072*19c3b8c2SApple OSS Distributions */
4073*19c3b8c2SApple OSS Distributions
4074*19c3b8c2SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4075*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4076*19c3b8c2SApple OSS Distributions if (req_qos) {
4077*19c3b8c2SApple OSS Distributions qos = req_qos->tr_qos;
4078*19c3b8c2SApple OSS Distributions }
4079*19c3b8c2SApple OSS Distributions
4080*19c3b8c2SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, uth);
4081*19c3b8c2SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4082*19c3b8c2SApple OSS Distributions /*
4083*19c3b8c2SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4084*19c3b8c2SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4085*19c3b8c2SApple OSS Distributions * cooperative.
4086*19c3b8c2SApple OSS Distributions *
4087*19c3b8c2SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4088*19c3b8c2SApple OSS Distributions */
4089*19c3b8c2SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, uth, true)) {
4090*19c3b8c2SApple OSS Distributions req_qos = req_tmp;
4091*19c3b8c2SApple OSS Distributions qos = req_qos->tr_qos;
4092*19c3b8c2SApple OSS Distributions }
4093*19c3b8c2SApple OSS Distributions }
4094*19c3b8c2SApple OSS Distributions
4095*19c3b8c2SApple OSS Distributions /*
4096*19c3b8c2SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4097*19c3b8c2SApple OSS Distributions * pool - and compare it with the constrained pool
4098*19c3b8c2SApple OSS Distributions */
4099*19c3b8c2SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4100*19c3b8c2SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4101*19c3b8c2SApple OSS Distributions
4102*19c3b8c2SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4103*19c3b8c2SApple OSS Distributions /*
4104*19c3b8c2SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4105*19c3b8c2SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4106*19c3b8c2SApple OSS Distributions */
4107*19c3b8c2SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4108*19c3b8c2SApple OSS Distributions return req_pri;
4109*19c3b8c2SApple OSS Distributions }
4110*19c3b8c2SApple OSS Distributions
4111*19c3b8c2SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, uth, true)) {
4112*19c3b8c2SApple OSS Distributions /*
4113*19c3b8c2SApple OSS Distributions * If the constrained thread request is the best one and passes
4114*19c3b8c2SApple OSS Distributions * the admission check, pick it.
4115*19c3b8c2SApple OSS Distributions */
4116*19c3b8c2SApple OSS Distributions return req_tmp;
4117*19c3b8c2SApple OSS Distributions }
4118*19c3b8c2SApple OSS Distributions }
4119*19c3b8c2SApple OSS Distributions
4120*19c3b8c2SApple OSS Distributions if (req_pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4121*19c3b8c2SApple OSS Distributions return req_pri;
4122*19c3b8c2SApple OSS Distributions }
4123*19c3b8c2SApple OSS Distributions
4124*19c3b8c2SApple OSS Distributions return req_qos;
4125*19c3b8c2SApple OSS Distributions }
4126*19c3b8c2SApple OSS Distributions
4127*19c3b8c2SApple OSS Distributions /*
4128*19c3b8c2SApple OSS Distributions * The creator is an anonymous thread that is counted as scheduled,
4129*19c3b8c2SApple OSS Distributions * but otherwise without its scheduler callback set or tracked as active
4130*19c3b8c2SApple OSS Distributions * that is used to make other threads.
4131*19c3b8c2SApple OSS Distributions *
4132*19c3b8c2SApple OSS Distributions * When more requests are added or an existing one is hurried along,
4133*19c3b8c2SApple OSS Distributions * a creator is elected and setup, or the existing one overridden accordingly.
4134*19c3b8c2SApple OSS Distributions *
4135*19c3b8c2SApple OSS Distributions * While this creator is in flight, because no request has been dequeued,
4136*19c3b8c2SApple OSS Distributions * already running threads have a chance at stealing thread requests avoiding
4137*19c3b8c2SApple OSS Distributions * useless context switches, and the creator once scheduled may not find any
4138*19c3b8c2SApple OSS Distributions * work to do and will then just park again.
4139*19c3b8c2SApple OSS Distributions *
4140*19c3b8c2SApple OSS Distributions * The creator serves the dual purpose of informing the scheduler of work that
4141*19c3b8c2SApple OSS Distributions * hasn't be materialized as threads yet, and also as a natural pacing mechanism
4142*19c3b8c2SApple OSS Distributions * for thread creation.
4143*19c3b8c2SApple OSS Distributions *
4144*19c3b8c2SApple OSS Distributions * By being anonymous (and not bound to anything) it means that thread requests
4145*19c3b8c2SApple OSS Distributions * can be stolen from this creator by threads already on core yielding more
4146*19c3b8c2SApple OSS Distributions * efficient scheduling and reduced context switches.
4147*19c3b8c2SApple OSS Distributions */
4148*19c3b8c2SApple OSS Distributions static void
workq_schedule_creator(proc_t p,struct workqueue * wq,workq_kern_threadreq_flags_t flags)4149*19c3b8c2SApple OSS Distributions workq_schedule_creator(proc_t p, struct workqueue *wq,
4150*19c3b8c2SApple OSS Distributions workq_kern_threadreq_flags_t flags)
4151*19c3b8c2SApple OSS Distributions {
4152*19c3b8c2SApple OSS Distributions workq_threadreq_t req;
4153*19c3b8c2SApple OSS Distributions struct uthread *uth;
4154*19c3b8c2SApple OSS Distributions bool needs_wakeup;
4155*19c3b8c2SApple OSS Distributions
4156*19c3b8c2SApple OSS Distributions workq_lock_held(wq);
4157*19c3b8c2SApple OSS Distributions assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
4158*19c3b8c2SApple OSS Distributions
4159*19c3b8c2SApple OSS Distributions again:
4160*19c3b8c2SApple OSS Distributions uth = wq->wq_creator;
4161*19c3b8c2SApple OSS Distributions
4162*19c3b8c2SApple OSS Distributions if (!wq->wq_reqcount) {
4163*19c3b8c2SApple OSS Distributions /*
4164*19c3b8c2SApple OSS Distributions * There is no thread request left.
4165*19c3b8c2SApple OSS Distributions *
4166*19c3b8c2SApple OSS Distributions * If there is a creator, leave everything in place, so that it cleans
4167*19c3b8c2SApple OSS Distributions * up itself in workq_push_idle_thread().
4168*19c3b8c2SApple OSS Distributions *
4169*19c3b8c2SApple OSS Distributions * Else, make sure the turnstile state is reset to no inheritor.
4170*19c3b8c2SApple OSS Distributions */
4171*19c3b8c2SApple OSS Distributions if (uth == NULL) {
4172*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4173*19c3b8c2SApple OSS Distributions }
4174*19c3b8c2SApple OSS Distributions return;
4175*19c3b8c2SApple OSS Distributions }
4176*19c3b8c2SApple OSS Distributions
4177*19c3b8c2SApple OSS Distributions req = workq_threadreq_select_for_creator(wq);
4178*19c3b8c2SApple OSS Distributions if (req == NULL) {
4179*19c3b8c2SApple OSS Distributions /*
4180*19c3b8c2SApple OSS Distributions * There isn't a thread request that passes the admission check.
4181*19c3b8c2SApple OSS Distributions *
4182*19c3b8c2SApple OSS Distributions * If there is a creator, do not touch anything, the creator will sort
4183*19c3b8c2SApple OSS Distributions * it out when it runs.
4184*19c3b8c2SApple OSS Distributions *
4185*19c3b8c2SApple OSS Distributions * Else, set the inheritor to "WORKQ" so that the turnstile propagation
4186*19c3b8c2SApple OSS Distributions * code calls us if anything changes.
4187*19c3b8c2SApple OSS Distributions */
4188*19c3b8c2SApple OSS Distributions if (uth == NULL) {
4189*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
4190*19c3b8c2SApple OSS Distributions }
4191*19c3b8c2SApple OSS Distributions return;
4192*19c3b8c2SApple OSS Distributions }
4193*19c3b8c2SApple OSS Distributions
4194*19c3b8c2SApple OSS Distributions
4195*19c3b8c2SApple OSS Distributions if (uth) {
4196*19c3b8c2SApple OSS Distributions /*
4197*19c3b8c2SApple OSS Distributions * We need to maybe override the creator we already have
4198*19c3b8c2SApple OSS Distributions */
4199*19c3b8c2SApple OSS Distributions if (workq_thread_needs_priority_change(req, uth)) {
4200*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4201*19c3b8c2SApple OSS Distributions wq, 1, uthread_tid(uth), req->tr_qos);
4202*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4203*19c3b8c2SApple OSS Distributions }
4204*19c3b8c2SApple OSS Distributions assert(wq->wq_inheritor == get_machthread(uth));
4205*19c3b8c2SApple OSS Distributions } else if (wq->wq_thidlecount) {
4206*19c3b8c2SApple OSS Distributions /*
4207*19c3b8c2SApple OSS Distributions * We need to unpark a creator thread
4208*19c3b8c2SApple OSS Distributions */
4209*19c3b8c2SApple OSS Distributions wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
4210*19c3b8c2SApple OSS Distributions &needs_wakeup);
4211*19c3b8c2SApple OSS Distributions /* Always reset the priorities on the newly chosen creator */
4212*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4213*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, get_machthread(uth),
4214*19c3b8c2SApple OSS Distributions TURNSTILE_INHERITOR_THREAD);
4215*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4216*19c3b8c2SApple OSS Distributions wq, 2, uthread_tid(uth), req->tr_qos);
4217*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4218*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields = 0;
4219*19c3b8c2SApple OSS Distributions if (needs_wakeup) {
4220*19c3b8c2SApple OSS Distributions workq_thread_wakeup(uth);
4221*19c3b8c2SApple OSS Distributions }
4222*19c3b8c2SApple OSS Distributions } else {
4223*19c3b8c2SApple OSS Distributions /*
4224*19c3b8c2SApple OSS Distributions * We need to allocate a thread...
4225*19c3b8c2SApple OSS Distributions */
4226*19c3b8c2SApple OSS Distributions if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
4227*19c3b8c2SApple OSS Distributions /* out of threads, just go away */
4228*19c3b8c2SApple OSS Distributions flags = WORKQ_THREADREQ_NONE;
4229*19c3b8c2SApple OSS Distributions } else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
4230*19c3b8c2SApple OSS Distributions act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
4231*19c3b8c2SApple OSS Distributions } else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
4232*19c3b8c2SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4233*19c3b8c2SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
4234*19c3b8c2SApple OSS Distributions } else if (workq_add_new_idle_thread(p, wq)) {
4235*19c3b8c2SApple OSS Distributions goto again;
4236*19c3b8c2SApple OSS Distributions } else {
4237*19c3b8c2SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4238*19c3b8c2SApple OSS Distributions }
4239*19c3b8c2SApple OSS Distributions
4240*19c3b8c2SApple OSS Distributions /*
4241*19c3b8c2SApple OSS Distributions * If the current thread is the inheritor:
4242*19c3b8c2SApple OSS Distributions *
4243*19c3b8c2SApple OSS Distributions * If we set the AST, then the thread will stay the inheritor until
4244*19c3b8c2SApple OSS Distributions * either the AST calls workq_kern_threadreq_redrive(), or it parks
4245*19c3b8c2SApple OSS Distributions * and calls workq_push_idle_thread().
4246*19c3b8c2SApple OSS Distributions *
4247*19c3b8c2SApple OSS Distributions * Else, the responsibility of the thread creation is with a thread-call
4248*19c3b8c2SApple OSS Distributions * and we need to clear the inheritor.
4249*19c3b8c2SApple OSS Distributions */
4250*19c3b8c2SApple OSS Distributions if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
4251*19c3b8c2SApple OSS Distributions wq->wq_inheritor == current_thread()) {
4252*19c3b8c2SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4253*19c3b8c2SApple OSS Distributions }
4254*19c3b8c2SApple OSS Distributions }
4255*19c3b8c2SApple OSS Distributions }
4256*19c3b8c2SApple OSS Distributions
4257*19c3b8c2SApple OSS Distributions /**
4258*19c3b8c2SApple OSS Distributions * Same as workq_unpark_select_threadreq_or_park_and_unlock,
4259*19c3b8c2SApple OSS Distributions * but do not allow early binds.
4260*19c3b8c2SApple OSS Distributions *
4261*19c3b8c2SApple OSS Distributions * Called with the base pri frozen, will unfreeze it.
4262*19c3b8c2SApple OSS Distributions */
4263*19c3b8c2SApple OSS Distributions __attribute__((noreturn, noinline))
4264*19c3b8c2SApple OSS Distributions static void
workq_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4265*19c3b8c2SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4266*19c3b8c2SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4267*19c3b8c2SApple OSS Distributions {
4268*19c3b8c2SApple OSS Distributions workq_threadreq_t req = NULL;
4269*19c3b8c2SApple OSS Distributions bool is_creator = (wq->wq_creator == uth);
4270*19c3b8c2SApple OSS Distributions bool schedule_creator = false;
4271*19c3b8c2SApple OSS Distributions
4272*19c3b8c2SApple OSS Distributions if (__improbable(_wq_exiting(wq))) {
4273*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0);
4274*19c3b8c2SApple OSS Distributions goto park;
4275*19c3b8c2SApple OSS Distributions }
4276*19c3b8c2SApple OSS Distributions
4277*19c3b8c2SApple OSS Distributions if (wq->wq_reqcount == 0) {
4278*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0);
4279*19c3b8c2SApple OSS Distributions goto park;
4280*19c3b8c2SApple OSS Distributions }
4281*19c3b8c2SApple OSS Distributions
4282*19c3b8c2SApple OSS Distributions req = workq_threadreq_select(wq, uth);
4283*19c3b8c2SApple OSS Distributions if (__improbable(req == NULL)) {
4284*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0);
4285*19c3b8c2SApple OSS Distributions goto park;
4286*19c3b8c2SApple OSS Distributions }
4287*19c3b8c2SApple OSS Distributions
4288*19c3b8c2SApple OSS Distributions struct uu_workq_policy old_pri = uth->uu_workq_pri;
4289*19c3b8c2SApple OSS Distributions uint8_t tr_flags = req->tr_flags;
4290*19c3b8c2SApple OSS Distributions struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
4291*19c3b8c2SApple OSS Distributions
4292*19c3b8c2SApple OSS Distributions /*
4293*19c3b8c2SApple OSS Distributions * Attempt to setup ourselves as the new thing to run, moving all priority
4294*19c3b8c2SApple OSS Distributions * pushes to ourselves.
4295*19c3b8c2SApple OSS Distributions *
4296*19c3b8c2SApple OSS Distributions * If the current thread is the creator, then the fact that we are presently
4297*19c3b8c2SApple OSS Distributions * running is proof that we'll do something useful, so keep going.
4298*19c3b8c2SApple OSS Distributions *
4299*19c3b8c2SApple OSS Distributions * For other cases, peek at the AST to know whether the scheduler wants
4300*19c3b8c2SApple OSS Distributions * to preempt us, if yes, park instead, and move the thread request
4301*19c3b8c2SApple OSS Distributions * turnstile back to the workqueue.
4302*19c3b8c2SApple OSS Distributions */
4303*19c3b8c2SApple OSS Distributions if (req_ts) {
4304*19c3b8c2SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4305*19c3b8c2SApple OSS Distributions turnstile_update_inheritor(req_ts, get_machthread(uth),
4306*19c3b8c2SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
4307*19c3b8c2SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4308*19c3b8c2SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4309*19c3b8c2SApple OSS Distributions });
4310*19c3b8c2SApple OSS Distributions }
4311*19c3b8c2SApple OSS Distributions
4312*19c3b8c2SApple OSS Distributions /* accounting changes of aggregate thscheduled_count and thactive which has
4313*19c3b8c2SApple OSS Distributions * to be paired with the workq_thread_reset_pri below so that we have
4314*19c3b8c2SApple OSS Distributions * uth->uu_workq_pri match with thactive.
4315*19c3b8c2SApple OSS Distributions *
4316*19c3b8c2SApple OSS Distributions * This is undone when the thread parks */
4317*19c3b8c2SApple OSS Distributions if (is_creator) {
4318*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
4319*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
4320*19c3b8c2SApple OSS Distributions wq->wq_creator = NULL;
4321*19c3b8c2SApple OSS Distributions _wq_thactive_inc(wq, req->tr_qos);
4322*19c3b8c2SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
4323*19c3b8c2SApple OSS Distributions } else if (old_pri.qos_bucket != req->tr_qos) {
4324*19c3b8c2SApple OSS Distributions _wq_thactive_move(wq, old_pri.qos_bucket, req->tr_qos);
4325*19c3b8c2SApple OSS Distributions }
4326*19c3b8c2SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4327*19c3b8c2SApple OSS Distributions
4328*19c3b8c2SApple OSS Distributions /*
4329*19c3b8c2SApple OSS Distributions * Make relevant accounting changes for pool specific counts.
4330*19c3b8c2SApple OSS Distributions *
4331*19c3b8c2SApple OSS Distributions * The schedule counts changing can affect what the next best request
4332*19c3b8c2SApple OSS Distributions * for cooperative thread pool is if this request is dequeued.
4333*19c3b8c2SApple OSS Distributions */
4334*19c3b8c2SApple OSS Distributions bool cooperative_sched_count_changed =
4335*19c3b8c2SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(wq, uth,
4336*19c3b8c2SApple OSS Distributions old_pri.qos_req, tr_flags);
4337*19c3b8c2SApple OSS Distributions
4338*19c3b8c2SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4339*19c3b8c2SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4340*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4341*19c3b8c2SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_COOPERATIVE);
4342*19c3b8c2SApple OSS Distributions } else {
4343*19c3b8c2SApple OSS Distributions workq_thread_set_type(uth, 0);
4344*19c3b8c2SApple OSS Distributions }
4345*19c3b8c2SApple OSS Distributions
4346*19c3b8c2SApple OSS Distributions if (__improbable(thread_unfreeze_base_pri(get_machthread(uth)) && !is_creator)) {
4347*19c3b8c2SApple OSS Distributions if (req_ts) {
4348*19c3b8c2SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4349*19c3b8c2SApple OSS Distributions turnstile_update_inheritor(req_ts, wq->wq_turnstile,
4350*19c3b8c2SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
4351*19c3b8c2SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4352*19c3b8c2SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4353*19c3b8c2SApple OSS Distributions });
4354*19c3b8c2SApple OSS Distributions }
4355*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0);
4356*19c3b8c2SApple OSS Distributions goto park_thawed;
4357*19c3b8c2SApple OSS Distributions }
4358*19c3b8c2SApple OSS Distributions
4359*19c3b8c2SApple OSS Distributions /*
4360*19c3b8c2SApple OSS Distributions * We passed all checks, dequeue the request, bind to it, and set it up
4361*19c3b8c2SApple OSS Distributions * to return to user.
4362*19c3b8c2SApple OSS Distributions */
4363*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4364*19c3b8c2SApple OSS Distributions workq_trace_req_id(req), tr_flags, 0);
4365*19c3b8c2SApple OSS Distributions wq->wq_fulfilled++;
4366*19c3b8c2SApple OSS Distributions schedule_creator = workq_threadreq_dequeue(wq, req,
4367*19c3b8c2SApple OSS Distributions cooperative_sched_count_changed);
4368*19c3b8c2SApple OSS Distributions
4369*19c3b8c2SApple OSS Distributions workq_thread_reset_cpupercent(req, uth);
4370*19c3b8c2SApple OSS Distributions
4371*19c3b8c2SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4372*19c3b8c2SApple OSS Distributions kqueue_threadreq_bind_prepost(p, req, uth);
4373*19c3b8c2SApple OSS Distributions req = NULL;
4374*19c3b8c2SApple OSS Distributions } else if (req->tr_count > 0) {
4375*19c3b8c2SApple OSS Distributions req = NULL;
4376*19c3b8c2SApple OSS Distributions }
4377*19c3b8c2SApple OSS Distributions
4378*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4379*19c3b8c2SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_NEW;
4380*19c3b8c2SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4381*19c3b8c2SApple OSS Distributions }
4382*19c3b8c2SApple OSS Distributions
4383*19c3b8c2SApple OSS Distributions /* If one of the following is true, call workq_schedule_creator (which also
4384*19c3b8c2SApple OSS Distributions * adjusts priority of existing creator):
4385*19c3b8c2SApple OSS Distributions *
4386*19c3b8c2SApple OSS Distributions * - We are the creator currently so the wq may need a new creator
4387*19c3b8c2SApple OSS Distributions * - The request we're binding to is the highest priority one, existing
4388*19c3b8c2SApple OSS Distributions * creator's priority might need to be adjusted to reflect the next
4389*19c3b8c2SApple OSS Distributions * highest TR
4390*19c3b8c2SApple OSS Distributions */
4391*19c3b8c2SApple OSS Distributions if (is_creator || schedule_creator) {
4392*19c3b8c2SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4393*19c3b8c2SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
4394*19c3b8c2SApple OSS Distributions }
4395*19c3b8c2SApple OSS Distributions
4396*19c3b8c2SApple OSS Distributions workq_unlock(wq);
4397*19c3b8c2SApple OSS Distributions
4398*19c3b8c2SApple OSS Distributions if (req) {
4399*19c3b8c2SApple OSS Distributions zfree(workq_zone_threadreq, req);
4400*19c3b8c2SApple OSS Distributions }
4401*19c3b8c2SApple OSS Distributions
4402*19c3b8c2SApple OSS Distributions /*
4403*19c3b8c2SApple OSS Distributions * Run Thread, Run!
4404*19c3b8c2SApple OSS Distributions */
4405*19c3b8c2SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
4406*19c3b8c2SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
4407*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
4408*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_overcommit(tr_flags)) {
4409*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4410*19c3b8c2SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4411*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
4412*19c3b8c2SApple OSS Distributions }
4413*19c3b8c2SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
4414*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
4415*19c3b8c2SApple OSS Distributions assert((upcall_flags & WQ_FLAG_THREAD_COOPERATIVE) == 0);
4416*19c3b8c2SApple OSS Distributions }
4417*19c3b8c2SApple OSS Distributions
4418*19c3b8c2SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
4419*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
4420*19c3b8c2SApple OSS Distributions }
4421*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4422*19c3b8c2SApple OSS Distributions
4423*19c3b8c2SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4424*19c3b8c2SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
4425*19c3b8c2SApple OSS Distributions } else {
4426*19c3b8c2SApple OSS Distributions #if CONFIG_PREADOPT_TG
4427*19c3b8c2SApple OSS Distributions /*
4428*19c3b8c2SApple OSS Distributions * The thread may have a preadopt thread group on it already because it
4429*19c3b8c2SApple OSS Distributions * got tagged with it as a creator thread. So we need to make sure to
4430*19c3b8c2SApple OSS Distributions * clear that since we don't have preadoption for anonymous thread
4431*19c3b8c2SApple OSS Distributions * requests
4432*19c3b8c2SApple OSS Distributions */
4433*19c3b8c2SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
4434*19c3b8c2SApple OSS Distributions #endif
4435*19c3b8c2SApple OSS Distributions }
4436*19c3b8c2SApple OSS Distributions
4437*19c3b8c2SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4438*19c3b8c2SApple OSS Distributions __builtin_unreachable();
4439*19c3b8c2SApple OSS Distributions
4440*19c3b8c2SApple OSS Distributions park:
4441*19c3b8c2SApple OSS Distributions thread_unfreeze_base_pri(get_machthread(uth));
4442*19c3b8c2SApple OSS Distributions park_thawed:
4443*19c3b8c2SApple OSS Distributions workq_park_and_unlock(p, wq, uth, setup_flags);
4444*19c3b8c2SApple OSS Distributions }
4445*19c3b8c2SApple OSS Distributions
4446*19c3b8c2SApple OSS Distributions /**
4447*19c3b8c2SApple OSS Distributions * Runs a thread request on a thread
4448*19c3b8c2SApple OSS Distributions *
4449*19c3b8c2SApple OSS Distributions * - if thread is THREAD_NULL, will find a thread and run the request there.
4450*19c3b8c2SApple OSS Distributions * Otherwise, the thread must be the current thread.
4451*19c3b8c2SApple OSS Distributions *
4452*19c3b8c2SApple OSS Distributions * - if req is NULL, will find the highest priority request and run that. If
4453*19c3b8c2SApple OSS Distributions * it is not NULL, it must be a threadreq object in state NEW. If it can not
4454*19c3b8c2SApple OSS Distributions * be run immediately, it will be enqueued and moved to state QUEUED.
4455*19c3b8c2SApple OSS Distributions *
4456*19c3b8c2SApple OSS Distributions * Either way, the thread request object serviced will be moved to state
4457*19c3b8c2SApple OSS Distributions * BINDING and attached to the uthread.
4458*19c3b8c2SApple OSS Distributions *
4459*19c3b8c2SApple OSS Distributions * Should be called with the workqueue lock held. Will drop it.
4460*19c3b8c2SApple OSS Distributions * Should be called with the base pri not frozen.
4461*19c3b8c2SApple OSS Distributions */
4462*19c3b8c2SApple OSS Distributions __attribute__((noreturn, noinline))
4463*19c3b8c2SApple OSS Distributions static void
workq_unpark_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4464*19c3b8c2SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4465*19c3b8c2SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4466*19c3b8c2SApple OSS Distributions {
4467*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
4468*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4469*19c3b8c2SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4470*19c3b8c2SApple OSS Distributions }
4471*19c3b8c2SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_NEW | UT_WORKQ_EARLY_BOUND);
4472*19c3b8c2SApple OSS Distributions /*
4473*19c3b8c2SApple OSS Distributions * This pointer is possibly freed and only used for tracing purposes.
4474*19c3b8c2SApple OSS Distributions */
4475*19c3b8c2SApple OSS Distributions workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
4476*19c3b8c2SApple OSS Distributions workq_unlock(wq);
4477*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4478*19c3b8c2SApple OSS Distributions VM_KERNEL_ADDRHIDE(req), 0, 0);
4479*19c3b8c2SApple OSS Distributions (void)req;
4480*19c3b8c2SApple OSS Distributions
4481*19c3b8c2SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4482*19c3b8c2SApple OSS Distributions __builtin_unreachable();
4483*19c3b8c2SApple OSS Distributions }
4484*19c3b8c2SApple OSS Distributions
4485*19c3b8c2SApple OSS Distributions thread_freeze_base_pri(get_machthread(uth));
4486*19c3b8c2SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
4487*19c3b8c2SApple OSS Distributions }
4488*19c3b8c2SApple OSS Distributions
4489*19c3b8c2SApple OSS Distributions static bool
workq_creator_should_yield(struct workqueue * wq,struct uthread * uth)4490*19c3b8c2SApple OSS Distributions workq_creator_should_yield(struct workqueue *wq, struct uthread *uth)
4491*19c3b8c2SApple OSS Distributions {
4492*19c3b8c2SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
4493*19c3b8c2SApple OSS Distributions
4494*19c3b8c2SApple OSS Distributions if (qos >= THREAD_QOS_USER_INTERACTIVE) {
4495*19c3b8c2SApple OSS Distributions return false;
4496*19c3b8c2SApple OSS Distributions }
4497*19c3b8c2SApple OSS Distributions
4498*19c3b8c2SApple OSS Distributions uint32_t snapshot = uth->uu_save.uus_workq_park_data.fulfilled_snapshot;
4499*19c3b8c2SApple OSS Distributions if (wq->wq_fulfilled == snapshot) {
4500*19c3b8c2SApple OSS Distributions return false;
4501*19c3b8c2SApple OSS Distributions }
4502*19c3b8c2SApple OSS Distributions
4503*19c3b8c2SApple OSS Distributions uint32_t cnt = 0, conc = wq_max_parallelism[_wq_bucket(qos)];
4504*19c3b8c2SApple OSS Distributions if (wq->wq_fulfilled - snapshot > conc) {
4505*19c3b8c2SApple OSS Distributions /* we fulfilled more than NCPU requests since being dispatched */
4506*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
4507*19c3b8c2SApple OSS Distributions wq->wq_fulfilled, snapshot);
4508*19c3b8c2SApple OSS Distributions return true;
4509*19c3b8c2SApple OSS Distributions }
4510*19c3b8c2SApple OSS Distributions
4511*19c3b8c2SApple OSS Distributions for (uint8_t i = _wq_bucket(qos); i < WORKQ_NUM_QOS_BUCKETS; i++) {
4512*19c3b8c2SApple OSS Distributions cnt += wq->wq_thscheduled_count[i];
4513*19c3b8c2SApple OSS Distributions }
4514*19c3b8c2SApple OSS Distributions if (conc <= cnt) {
4515*19c3b8c2SApple OSS Distributions /* We fulfilled requests and have more than NCPU scheduled threads */
4516*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
4517*19c3b8c2SApple OSS Distributions wq->wq_fulfilled, snapshot);
4518*19c3b8c2SApple OSS Distributions return true;
4519*19c3b8c2SApple OSS Distributions }
4520*19c3b8c2SApple OSS Distributions
4521*19c3b8c2SApple OSS Distributions return false;
4522*19c3b8c2SApple OSS Distributions }
4523*19c3b8c2SApple OSS Distributions
4524*19c3b8c2SApple OSS Distributions /**
4525*19c3b8c2SApple OSS Distributions * parked thread wakes up
4526*19c3b8c2SApple OSS Distributions */
4527*19c3b8c2SApple OSS Distributions __attribute__((noreturn, noinline))
4528*19c3b8c2SApple OSS Distributions static void
workq_unpark_continue(void * parameter __unused,wait_result_t wr __unused)4529*19c3b8c2SApple OSS Distributions workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
4530*19c3b8c2SApple OSS Distributions {
4531*19c3b8c2SApple OSS Distributions thread_t th = current_thread();
4532*19c3b8c2SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
4533*19c3b8c2SApple OSS Distributions proc_t p = current_proc();
4534*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
4535*19c3b8c2SApple OSS Distributions
4536*19c3b8c2SApple OSS Distributions workq_lock_spin(wq);
4537*19c3b8c2SApple OSS Distributions
4538*19c3b8c2SApple OSS Distributions if (wq->wq_creator == uth && workq_creator_should_yield(wq, uth)) {
4539*19c3b8c2SApple OSS Distributions /*
4540*19c3b8c2SApple OSS Distributions * If the number of threads we have out are able to keep up with the
4541*19c3b8c2SApple OSS Distributions * demand, then we should avoid sending this creator thread to
4542*19c3b8c2SApple OSS Distributions * userspace.
4543*19c3b8c2SApple OSS Distributions */
4544*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4545*19c3b8c2SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields++;
4546*19c3b8c2SApple OSS Distributions workq_unlock(wq);
4547*19c3b8c2SApple OSS Distributions thread_yield_with_continuation(workq_unpark_continue, NULL);
4548*19c3b8c2SApple OSS Distributions __builtin_unreachable();
4549*19c3b8c2SApple OSS Distributions }
4550*19c3b8c2SApple OSS Distributions
4551*19c3b8c2SApple OSS Distributions if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
4552*19c3b8c2SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
4553*19c3b8c2SApple OSS Distributions __builtin_unreachable();
4554*19c3b8c2SApple OSS Distributions }
4555*19c3b8c2SApple OSS Distributions
4556*19c3b8c2SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
4557*19c3b8c2SApple OSS Distributions /*
4558*19c3b8c2SApple OSS Distributions * We were set running, but for the purposes of dying.
4559*19c3b8c2SApple OSS Distributions */
4560*19c3b8c2SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
4561*19c3b8c2SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_NEW) == 0);
4562*19c3b8c2SApple OSS Distributions } else {
4563*19c3b8c2SApple OSS Distributions /*
4564*19c3b8c2SApple OSS Distributions * workaround for <rdar://problem/38647347>,
4565*19c3b8c2SApple OSS Distributions * in case we do hit userspace, make sure calling
4566*19c3b8c2SApple OSS Distributions * workq_thread_terminate() does the right thing here,
4567*19c3b8c2SApple OSS Distributions * and if we never call it, that workq_exit() will too because it sees
4568*19c3b8c2SApple OSS Distributions * this thread on the runlist.
4569*19c3b8c2SApple OSS Distributions */
4570*19c3b8c2SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
4571*19c3b8c2SApple OSS Distributions wq->wq_thdying_count++;
4572*19c3b8c2SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
4573*19c3b8c2SApple OSS Distributions }
4574*19c3b8c2SApple OSS Distributions
4575*19c3b8c2SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
4576*19c3b8c2SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
4577*19c3b8c2SApple OSS Distributions __builtin_unreachable();
4578*19c3b8c2SApple OSS Distributions }
4579*19c3b8c2SApple OSS Distributions
4580*19c3b8c2SApple OSS Distributions __attribute__((noreturn, noinline))
4581*19c3b8c2SApple OSS Distributions static void
workq_setup_and_run(proc_t p,struct uthread * uth,int setup_flags)4582*19c3b8c2SApple OSS Distributions workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags)
4583*19c3b8c2SApple OSS Distributions {
4584*19c3b8c2SApple OSS Distributions thread_t th = get_machthread(uth);
4585*19c3b8c2SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
4586*19c3b8c2SApple OSS Distributions
4587*19c3b8c2SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
4588*19c3b8c2SApple OSS Distributions /*
4589*19c3b8c2SApple OSS Distributions * For preemption reasons, we want to reset the voucher as late as
4590*19c3b8c2SApple OSS Distributions * possible, so we do it in two places:
4591*19c3b8c2SApple OSS Distributions * - Just before parking (i.e. in workq_park_and_unlock())
4592*19c3b8c2SApple OSS Distributions * - Prior to doing the setup for the next workitem (i.e. here)
4593*19c3b8c2SApple OSS Distributions *
4594*19c3b8c2SApple OSS Distributions * Those two places are sufficient to ensure we always reset it before
4595*19c3b8c2SApple OSS Distributions * it goes back out to user space, but be careful to not break that
4596*19c3b8c2SApple OSS Distributions * guarantee.
4597*19c3b8c2SApple OSS Distributions *
4598*19c3b8c2SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
4599*19c3b8c2SApple OSS Distributions * thread group on this thread
4600*19c3b8c2SApple OSS Distributions */
4601*19c3b8c2SApple OSS Distributions __assert_only kern_return_t kr;
4602*19c3b8c2SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
4603*19c3b8c2SApple OSS Distributions assert(kr == KERN_SUCCESS);
4604*19c3b8c2SApple OSS Distributions }
4605*19c3b8c2SApple OSS Distributions
4606*19c3b8c2SApple OSS Distributions uint32_t upcall_flags = uth->uu_save.uus_workq_park_data.upcall_flags;
4607*19c3b8c2SApple OSS Distributions if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
4608*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
4609*19c3b8c2SApple OSS Distributions }
4610*19c3b8c2SApple OSS Distributions
4611*19c3b8c2SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
4612*19c3b8c2SApple OSS Distributions /*
4613*19c3b8c2SApple OSS Distributions * For threads that have an outside-of-QoS thread priority, indicate
4614*19c3b8c2SApple OSS Distributions * to userspace that setting QoS should only affect the TSD and not
4615*19c3b8c2SApple OSS Distributions * change QOS in the kernel.
4616*19c3b8c2SApple OSS Distributions */
4617*19c3b8c2SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
4618*19c3b8c2SApple OSS Distributions } else {
4619*19c3b8c2SApple OSS Distributions /*
4620*19c3b8c2SApple OSS Distributions * Put the QoS class value into the lower bits of the reuse_thread
4621*19c3b8c2SApple OSS Distributions * register, this is where the thread priority used to be stored
4622*19c3b8c2SApple OSS Distributions * anyway.
4623*19c3b8c2SApple OSS Distributions */
4624*19c3b8c2SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
4625*19c3b8c2SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
4626*19c3b8c2SApple OSS Distributions }
4627*19c3b8c2SApple OSS Distributions
4628*19c3b8c2SApple OSS Distributions if (uth->uu_workq_thport == MACH_PORT_NULL) {
4629*19c3b8c2SApple OSS Distributions /* convert_thread_to_port_pinned() consumes a reference */
4630*19c3b8c2SApple OSS Distributions thread_reference(th);
4631*19c3b8c2SApple OSS Distributions /* Convert to immovable/pinned thread port, but port is not pinned yet */
4632*19c3b8c2SApple OSS Distributions ipc_port_t port = convert_thread_to_port_pinned(th);
4633*19c3b8c2SApple OSS Distributions /* Atomically, pin and copy out the port */
4634*19c3b8c2SApple OSS Distributions uth->uu_workq_thport = ipc_port_copyout_send_pinned(port, get_task_ipcspace(proc_task(p)));
4635*19c3b8c2SApple OSS Distributions }
4636*19c3b8c2SApple OSS Distributions
4637*19c3b8c2SApple OSS Distributions /* Thread has been set up to run, arm its next workqueue quantum or disarm
4638*19c3b8c2SApple OSS Distributions * if it is no longer supporting that */
4639*19c3b8c2SApple OSS Distributions if (thread_supports_cooperative_workqueue(th)) {
4640*19c3b8c2SApple OSS Distributions thread_arm_workqueue_quantum(th);
4641*19c3b8c2SApple OSS Distributions } else {
4642*19c3b8c2SApple OSS Distributions thread_disarm_workqueue_quantum(th);
4643*19c3b8c2SApple OSS Distributions }
4644*19c3b8c2SApple OSS Distributions
4645*19c3b8c2SApple OSS Distributions /*
4646*19c3b8c2SApple OSS Distributions * Call out to pthread, this sets up the thread, pulls in kevent structs
4647*19c3b8c2SApple OSS Distributions * onto the stack, sets up the thread state and then returns to userspace.
4648*19c3b8c2SApple OSS Distributions */
4649*19c3b8c2SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
4650*19c3b8c2SApple OSS Distributions proc_get_wqptr_fast(p), 0, 0, 0);
4651*19c3b8c2SApple OSS Distributions
4652*19c3b8c2SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
4653*19c3b8c2SApple OSS Distributions thread_sched_call(th, NULL);
4654*19c3b8c2SApple OSS Distributions } else {
4655*19c3b8c2SApple OSS Distributions thread_sched_call(th, workq_sched_callback);
4656*19c3b8c2SApple OSS Distributions }
4657*19c3b8c2SApple OSS Distributions
4658*19c3b8c2SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
4659*19c3b8c2SApple OSS Distributions uth->uu_workq_thport, 0, setup_flags, upcall_flags);
4660*19c3b8c2SApple OSS Distributions
4661*19c3b8c2SApple OSS Distributions __builtin_unreachable();
4662*19c3b8c2SApple OSS Distributions }
4663*19c3b8c2SApple OSS Distributions
4664*19c3b8c2SApple OSS Distributions #pragma mark misc
4665*19c3b8c2SApple OSS Distributions
4666*19c3b8c2SApple OSS Distributions int
fill_procworkqueue(proc_t p,struct proc_workqueueinfo * pwqinfo)4667*19c3b8c2SApple OSS Distributions fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
4668*19c3b8c2SApple OSS Distributions {
4669*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
4670*19c3b8c2SApple OSS Distributions int error = 0;
4671*19c3b8c2SApple OSS Distributions int activecount;
4672*19c3b8c2SApple OSS Distributions
4673*19c3b8c2SApple OSS Distributions if (wq == NULL) {
4674*19c3b8c2SApple OSS Distributions return EINVAL;
4675*19c3b8c2SApple OSS Distributions }
4676*19c3b8c2SApple OSS Distributions
4677*19c3b8c2SApple OSS Distributions /*
4678*19c3b8c2SApple OSS Distributions * This is sometimes called from interrupt context by the kperf sampler.
4679*19c3b8c2SApple OSS Distributions * In that case, it's not safe to spin trying to take the lock since we
4680*19c3b8c2SApple OSS Distributions * might already hold it. So, we just try-lock it and error out if it's
4681*19c3b8c2SApple OSS Distributions * already held. Since this is just a debugging aid, and all our callers
4682*19c3b8c2SApple OSS Distributions * are able to handle an error, that's fine.
4683*19c3b8c2SApple OSS Distributions */
4684*19c3b8c2SApple OSS Distributions bool locked = workq_lock_try(wq);
4685*19c3b8c2SApple OSS Distributions if (!locked) {
4686*19c3b8c2SApple OSS Distributions return EBUSY;
4687*19c3b8c2SApple OSS Distributions }
4688*19c3b8c2SApple OSS Distributions
4689*19c3b8c2SApple OSS Distributions wq_thactive_t act = _wq_thactive(wq);
4690*19c3b8c2SApple OSS Distributions activecount = _wq_thactive_aggregate_downto_qos(wq, act,
4691*19c3b8c2SApple OSS Distributions WORKQ_THREAD_QOS_MIN, NULL, NULL);
4692*19c3b8c2SApple OSS Distributions if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
4693*19c3b8c2SApple OSS Distributions activecount++;
4694*19c3b8c2SApple OSS Distributions }
4695*19c3b8c2SApple OSS Distributions pwqinfo->pwq_nthreads = wq->wq_nthreads;
4696*19c3b8c2SApple OSS Distributions pwqinfo->pwq_runthreads = activecount;
4697*19c3b8c2SApple OSS Distributions pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
4698*19c3b8c2SApple OSS Distributions pwqinfo->pwq_state = 0;
4699*19c3b8c2SApple OSS Distributions
4700*19c3b8c2SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
4701*19c3b8c2SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
4702*19c3b8c2SApple OSS Distributions }
4703*19c3b8c2SApple OSS Distributions
4704*19c3b8c2SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
4705*19c3b8c2SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
4706*19c3b8c2SApple OSS Distributions }
4707*19c3b8c2SApple OSS Distributions
4708*19c3b8c2SApple OSS Distributions workq_unlock(wq);
4709*19c3b8c2SApple OSS Distributions return error;
4710*19c3b8c2SApple OSS Distributions }
4711*19c3b8c2SApple OSS Distributions
4712*19c3b8c2SApple OSS Distributions boolean_t
workqueue_get_pwq_exceeded(void * v,boolean_t * exceeded_total,boolean_t * exceeded_constrained)4713*19c3b8c2SApple OSS Distributions workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
4714*19c3b8c2SApple OSS Distributions boolean_t *exceeded_constrained)
4715*19c3b8c2SApple OSS Distributions {
4716*19c3b8c2SApple OSS Distributions proc_t p = v;
4717*19c3b8c2SApple OSS Distributions struct proc_workqueueinfo pwqinfo;
4718*19c3b8c2SApple OSS Distributions int err;
4719*19c3b8c2SApple OSS Distributions
4720*19c3b8c2SApple OSS Distributions assert(p != NULL);
4721*19c3b8c2SApple OSS Distributions assert(exceeded_total != NULL);
4722*19c3b8c2SApple OSS Distributions assert(exceeded_constrained != NULL);
4723*19c3b8c2SApple OSS Distributions
4724*19c3b8c2SApple OSS Distributions err = fill_procworkqueue(p, &pwqinfo);
4725*19c3b8c2SApple OSS Distributions if (err) {
4726*19c3b8c2SApple OSS Distributions return FALSE;
4727*19c3b8c2SApple OSS Distributions }
4728*19c3b8c2SApple OSS Distributions if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
4729*19c3b8c2SApple OSS Distributions return FALSE;
4730*19c3b8c2SApple OSS Distributions }
4731*19c3b8c2SApple OSS Distributions
4732*19c3b8c2SApple OSS Distributions *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
4733*19c3b8c2SApple OSS Distributions *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
4734*19c3b8c2SApple OSS Distributions
4735*19c3b8c2SApple OSS Distributions return TRUE;
4736*19c3b8c2SApple OSS Distributions }
4737*19c3b8c2SApple OSS Distributions
4738*19c3b8c2SApple OSS Distributions uint32_t
workqueue_get_pwq_state_kdp(void * v)4739*19c3b8c2SApple OSS Distributions workqueue_get_pwq_state_kdp(void * v)
4740*19c3b8c2SApple OSS Distributions {
4741*19c3b8c2SApple OSS Distributions static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
4742*19c3b8c2SApple OSS Distributions kTaskWqExceededConstrainedThreadLimit);
4743*19c3b8c2SApple OSS Distributions static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
4744*19c3b8c2SApple OSS Distributions kTaskWqExceededTotalThreadLimit);
4745*19c3b8c2SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
4746*19c3b8c2SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
4747*19c3b8c2SApple OSS Distributions WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT) == 0x7);
4748*19c3b8c2SApple OSS Distributions
4749*19c3b8c2SApple OSS Distributions if (v == NULL) {
4750*19c3b8c2SApple OSS Distributions return 0;
4751*19c3b8c2SApple OSS Distributions }
4752*19c3b8c2SApple OSS Distributions
4753*19c3b8c2SApple OSS Distributions proc_t p = v;
4754*19c3b8c2SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
4755*19c3b8c2SApple OSS Distributions
4756*19c3b8c2SApple OSS Distributions if (wq == NULL || workq_lock_is_acquired_kdp(wq)) {
4757*19c3b8c2SApple OSS Distributions return 0;
4758*19c3b8c2SApple OSS Distributions }
4759*19c3b8c2SApple OSS Distributions
4760*19c3b8c2SApple OSS Distributions uint32_t pwq_state = WQ_FLAGS_AVAILABLE;
4761*19c3b8c2SApple OSS Distributions
4762*19c3b8c2SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
4763*19c3b8c2SApple OSS Distributions pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
4764*19c3b8c2SApple OSS Distributions }
4765*19c3b8c2SApple OSS Distributions
4766*19c3b8c2SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
4767*19c3b8c2SApple OSS Distributions pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
4768*19c3b8c2SApple OSS Distributions }
4769*19c3b8c2SApple OSS Distributions
4770*19c3b8c2SApple OSS Distributions return pwq_state;
4771*19c3b8c2SApple OSS Distributions }
4772*19c3b8c2SApple OSS Distributions
4773*19c3b8c2SApple OSS Distributions void
workq_init(void)4774*19c3b8c2SApple OSS Distributions workq_init(void)
4775*19c3b8c2SApple OSS Distributions {
4776*19c3b8c2SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
4777*19c3b8c2SApple OSS Distributions NSEC_PER_USEC, &wq_stalled_window.abstime);
4778*19c3b8c2SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
4779*19c3b8c2SApple OSS Distributions NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
4780*19c3b8c2SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
4781*19c3b8c2SApple OSS Distributions NSEC_PER_USEC, &wq_max_timer_interval.abstime);
4782*19c3b8c2SApple OSS Distributions
4783*19c3b8c2SApple OSS Distributions thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
4784*19c3b8c2SApple OSS Distributions workq_deallocate_queue_invoke);
4785*19c3b8c2SApple OSS Distributions }
4786