1*4f1223e8SApple OSS Distributions /*
2*4f1223e8SApple OSS Distributions * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3*4f1223e8SApple OSS Distributions *
4*4f1223e8SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5*4f1223e8SApple OSS Distributions *
6*4f1223e8SApple OSS Distributions * This file contains Original Code and/or Modifications of Original Code
7*4f1223e8SApple OSS Distributions * as defined in and that are subject to the Apple Public Source License
8*4f1223e8SApple OSS Distributions * Version 2.0 (the 'License'). You may not use this file except in
9*4f1223e8SApple OSS Distributions * compliance with the License. The rights granted to you under the License
10*4f1223e8SApple OSS Distributions * may not be used to create, or enable the creation or redistribution of,
11*4f1223e8SApple OSS Distributions * unlawful or unlicensed copies of an Apple operating system, or to
12*4f1223e8SApple OSS Distributions * circumvent, violate, or enable the circumvention or violation of, any
13*4f1223e8SApple OSS Distributions * terms of an Apple operating system software license agreement.
14*4f1223e8SApple OSS Distributions *
15*4f1223e8SApple OSS Distributions * Please obtain a copy of the License at
16*4f1223e8SApple OSS Distributions * http://www.opensource.apple.com/apsl/ and read it before using this file.
17*4f1223e8SApple OSS Distributions *
18*4f1223e8SApple OSS Distributions * The Original Code and all software distributed under the License are
19*4f1223e8SApple OSS Distributions * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20*4f1223e8SApple OSS Distributions * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21*4f1223e8SApple OSS Distributions * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22*4f1223e8SApple OSS Distributions * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23*4f1223e8SApple OSS Distributions * Please see the License for the specific language governing rights and
24*4f1223e8SApple OSS Distributions * limitations under the License.
25*4f1223e8SApple OSS Distributions *
26*4f1223e8SApple OSS Distributions * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27*4f1223e8SApple OSS Distributions */
28*4f1223e8SApple OSS Distributions /* Copyright (c) 1995-2018 Apple, Inc. All Rights Reserved */
29*4f1223e8SApple OSS Distributions
30*4f1223e8SApple OSS Distributions #include <sys/cdefs.h>
31*4f1223e8SApple OSS Distributions
32*4f1223e8SApple OSS Distributions #include <kern/assert.h>
33*4f1223e8SApple OSS Distributions #include <kern/ast.h>
34*4f1223e8SApple OSS Distributions #include <kern/clock.h>
35*4f1223e8SApple OSS Distributions #include <kern/cpu_data.h>
36*4f1223e8SApple OSS Distributions #include <kern/kern_types.h>
37*4f1223e8SApple OSS Distributions #include <kern/policy_internal.h>
38*4f1223e8SApple OSS Distributions #include <kern/processor.h>
39*4f1223e8SApple OSS Distributions #include <kern/sched_prim.h> /* for thread_exception_return */
40*4f1223e8SApple OSS Distributions #include <kern/task.h>
41*4f1223e8SApple OSS Distributions #include <kern/thread.h>
42*4f1223e8SApple OSS Distributions #include <kern/thread_group.h>
43*4f1223e8SApple OSS Distributions #include <kern/zalloc.h>
44*4f1223e8SApple OSS Distributions #include <kern/work_interval.h>
45*4f1223e8SApple OSS Distributions #include <mach/kern_return.h>
46*4f1223e8SApple OSS Distributions #include <mach/mach_param.h>
47*4f1223e8SApple OSS Distributions #include <mach/mach_port.h>
48*4f1223e8SApple OSS Distributions #include <mach/mach_types.h>
49*4f1223e8SApple OSS Distributions #include <mach/mach_vm.h>
50*4f1223e8SApple OSS Distributions #include <mach/sync_policy.h>
51*4f1223e8SApple OSS Distributions #include <mach/task.h>
52*4f1223e8SApple OSS Distributions #include <mach/thread_act.h> /* for thread_resume */
53*4f1223e8SApple OSS Distributions #include <mach/thread_policy.h>
54*4f1223e8SApple OSS Distributions #include <mach/thread_status.h>
55*4f1223e8SApple OSS Distributions #include <mach/vm_prot.h>
56*4f1223e8SApple OSS Distributions #include <mach/vm_statistics.h>
57*4f1223e8SApple OSS Distributions #include <machine/atomic.h>
58*4f1223e8SApple OSS Distributions #include <machine/machine_routines.h>
59*4f1223e8SApple OSS Distributions #include <machine/smp.h>
60*4f1223e8SApple OSS Distributions #include <vm/vm_map.h>
61*4f1223e8SApple OSS Distributions #include <vm/vm_protos.h>
62*4f1223e8SApple OSS Distributions
63*4f1223e8SApple OSS Distributions #include <sys/eventvar.h>
64*4f1223e8SApple OSS Distributions #include <sys/kdebug.h>
65*4f1223e8SApple OSS Distributions #include <sys/kernel.h>
66*4f1223e8SApple OSS Distributions #include <sys/lock.h>
67*4f1223e8SApple OSS Distributions #include <sys/param.h>
68*4f1223e8SApple OSS Distributions #include <sys/proc_info.h> /* for fill_procworkqueue */
69*4f1223e8SApple OSS Distributions #include <sys/proc_internal.h>
70*4f1223e8SApple OSS Distributions #include <sys/pthread_shims.h>
71*4f1223e8SApple OSS Distributions #include <sys/resourcevar.h>
72*4f1223e8SApple OSS Distributions #include <sys/signalvar.h>
73*4f1223e8SApple OSS Distributions #include <sys/sysctl.h>
74*4f1223e8SApple OSS Distributions #include <sys/sysproto.h>
75*4f1223e8SApple OSS Distributions #include <sys/systm.h>
76*4f1223e8SApple OSS Distributions #include <sys/ulock.h> /* for ulock_owner_value_to_port_name */
77*4f1223e8SApple OSS Distributions
78*4f1223e8SApple OSS Distributions #include <pthread/bsdthread_private.h>
79*4f1223e8SApple OSS Distributions #include <pthread/workqueue_syscalls.h>
80*4f1223e8SApple OSS Distributions #include <pthread/workqueue_internal.h>
81*4f1223e8SApple OSS Distributions #include <pthread/workqueue_trace.h>
82*4f1223e8SApple OSS Distributions
83*4f1223e8SApple OSS Distributions #include <os/log.h>
84*4f1223e8SApple OSS Distributions
85*4f1223e8SApple OSS Distributions static void workq_unpark_continue(void *uth, wait_result_t wr) __dead2;
86*4f1223e8SApple OSS Distributions
87*4f1223e8SApple OSS Distributions static void workq_bound_thread_unpark_continue(void *uth, wait_result_t wr) __dead2;
88*4f1223e8SApple OSS Distributions
89*4f1223e8SApple OSS Distributions static void workq_bound_thread_initialize_and_unpark_continue(void *uth, wait_result_t wr) __dead2;
90*4f1223e8SApple OSS Distributions
91*4f1223e8SApple OSS Distributions static void workq_bound_thread_setup_and_run(struct uthread *uth, int setup_flags) __dead2;
92*4f1223e8SApple OSS Distributions
93*4f1223e8SApple OSS Distributions static void workq_schedule_creator(proc_t p, struct workqueue *wq,
94*4f1223e8SApple OSS Distributions workq_kern_threadreq_flags_t flags);
95*4f1223e8SApple OSS Distributions
96*4f1223e8SApple OSS Distributions static bool workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
97*4f1223e8SApple OSS Distributions workq_threadreq_t req);
98*4f1223e8SApple OSS Distributions
99*4f1223e8SApple OSS Distributions static uint32_t workq_constrained_allowance(struct workqueue *wq,
100*4f1223e8SApple OSS Distributions thread_qos_t at_qos, struct uthread *uth,
101*4f1223e8SApple OSS Distributions bool may_start_timer, bool record_failed_allowance);
102*4f1223e8SApple OSS Distributions
103*4f1223e8SApple OSS Distributions static bool _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq);
104*4f1223e8SApple OSS Distributions
105*4f1223e8SApple OSS Distributions static bool workq_thread_is_busy(uint64_t cur_ts,
106*4f1223e8SApple OSS Distributions _Atomic uint64_t *lastblocked_tsp);
107*4f1223e8SApple OSS Distributions
108*4f1223e8SApple OSS Distributions static int workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS;
109*4f1223e8SApple OSS Distributions
110*4f1223e8SApple OSS Distributions static bool
111*4f1223e8SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags);
112*4f1223e8SApple OSS Distributions
113*4f1223e8SApple OSS Distributions static inline void
114*4f1223e8SApple OSS Distributions workq_lock_spin(struct workqueue *wq);
115*4f1223e8SApple OSS Distributions
116*4f1223e8SApple OSS Distributions static inline void
117*4f1223e8SApple OSS Distributions workq_unlock(struct workqueue *wq);
118*4f1223e8SApple OSS Distributions
119*4f1223e8SApple OSS Distributions #pragma mark globals
120*4f1223e8SApple OSS Distributions
121*4f1223e8SApple OSS Distributions struct workq_usec_var {
122*4f1223e8SApple OSS Distributions uint32_t usecs;
123*4f1223e8SApple OSS Distributions uint64_t abstime;
124*4f1223e8SApple OSS Distributions };
125*4f1223e8SApple OSS Distributions
126*4f1223e8SApple OSS Distributions #define WORKQ_SYSCTL_USECS(var, init) \
127*4f1223e8SApple OSS Distributions static struct workq_usec_var var = { .usecs = init }; \
128*4f1223e8SApple OSS Distributions SYSCTL_OID(_kern, OID_AUTO, var##_usecs, \
129*4f1223e8SApple OSS Distributions CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &var, 0, \
130*4f1223e8SApple OSS Distributions workq_sysctl_handle_usecs, "I", "")
131*4f1223e8SApple OSS Distributions
132*4f1223e8SApple OSS Distributions static LCK_GRP_DECLARE(workq_lck_grp, "workq");
133*4f1223e8SApple OSS Distributions os_refgrp_decl(static, workq_refgrp, "workq", NULL);
134*4f1223e8SApple OSS Distributions
135*4f1223e8SApple OSS Distributions static ZONE_DEFINE(workq_zone_workqueue, "workq.wq",
136*4f1223e8SApple OSS Distributions sizeof(struct workqueue), ZC_NONE);
137*4f1223e8SApple OSS Distributions static ZONE_DEFINE(workq_zone_threadreq, "workq.threadreq",
138*4f1223e8SApple OSS Distributions sizeof(struct workq_threadreq_s), ZC_CACHING);
139*4f1223e8SApple OSS Distributions
140*4f1223e8SApple OSS Distributions static struct mpsc_daemon_queue workq_deallocate_queue;
141*4f1223e8SApple OSS Distributions
142*4f1223e8SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_stalled_window, WQ_STALLED_WINDOW_USECS);
143*4f1223e8SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_reduce_pool_window, WQ_REDUCE_POOL_WINDOW_USECS);
144*4f1223e8SApple OSS Distributions WORKQ_SYSCTL_USECS(wq_max_timer_interval, WQ_MAX_TIMER_INTERVAL_USECS);
145*4f1223e8SApple OSS Distributions static uint32_t wq_max_threads = WORKQUEUE_MAXTHREADS;
146*4f1223e8SApple OSS Distributions static uint32_t wq_max_constrained_threads = WORKQUEUE_MAXTHREADS / 8;
147*4f1223e8SApple OSS Distributions static uint32_t wq_init_constrained_limit = 1;
148*4f1223e8SApple OSS Distributions static uint16_t wq_death_max_load;
149*4f1223e8SApple OSS Distributions static uint32_t wq_max_parallelism[WORKQ_NUM_QOS_BUCKETS];
150*4f1223e8SApple OSS Distributions
151*4f1223e8SApple OSS Distributions /*
152*4f1223e8SApple OSS Distributions * This is not a hard limit but the max size we want to aim to hit across the
153*4f1223e8SApple OSS Distributions * entire cooperative pool. We can oversubscribe the pool due to non-cooperative
154*4f1223e8SApple OSS Distributions * workers and the max we will oversubscribe the pool by, is a total of
155*4f1223e8SApple OSS Distributions * wq_max_cooperative_threads * WORKQ_NUM_QOS_BUCKETS.
156*4f1223e8SApple OSS Distributions */
157*4f1223e8SApple OSS Distributions static uint32_t wq_max_cooperative_threads;
158*4f1223e8SApple OSS Distributions
159*4f1223e8SApple OSS Distributions static inline uint32_t
wq_cooperative_queue_max_size(struct workqueue * wq)160*4f1223e8SApple OSS Distributions wq_cooperative_queue_max_size(struct workqueue *wq)
161*4f1223e8SApple OSS Distributions {
162*4f1223e8SApple OSS Distributions return wq->wq_cooperative_queue_has_limited_max_size ? 1 : wq_max_cooperative_threads;
163*4f1223e8SApple OSS Distributions }
164*4f1223e8SApple OSS Distributions
165*4f1223e8SApple OSS Distributions #pragma mark sysctls
166*4f1223e8SApple OSS Distributions
167*4f1223e8SApple OSS Distributions static int
168*4f1223e8SApple OSS Distributions workq_sysctl_handle_usecs SYSCTL_HANDLER_ARGS
169*4f1223e8SApple OSS Distributions {
170*4f1223e8SApple OSS Distributions #pragma unused(arg2)
171*4f1223e8SApple OSS Distributions struct workq_usec_var *v = arg1;
172*4f1223e8SApple OSS Distributions int error = sysctl_handle_int(oidp, &v->usecs, 0, req);
173*4f1223e8SApple OSS Distributions if (error || !req->newptr) {
174*4f1223e8SApple OSS Distributions return error;
175*4f1223e8SApple OSS Distributions }
176*4f1223e8SApple OSS Distributions clock_interval_to_absolutetime_interval(v->usecs, NSEC_PER_USEC,
177*4f1223e8SApple OSS Distributions &v->abstime);
178*4f1223e8SApple OSS Distributions return 0;
179*4f1223e8SApple OSS Distributions }
180*4f1223e8SApple OSS Distributions
181*4f1223e8SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
182*4f1223e8SApple OSS Distributions &wq_max_threads, 0, "");
183*4f1223e8SApple OSS Distributions
184*4f1223e8SApple OSS Distributions SYSCTL_INT(_kern, OID_AUTO, wq_max_constrained_threads, CTLFLAG_RW | CTLFLAG_LOCKED,
185*4f1223e8SApple OSS Distributions &wq_max_constrained_threads, 0, "");
186*4f1223e8SApple OSS Distributions
187*4f1223e8SApple OSS Distributions static int
188*4f1223e8SApple OSS Distributions wq_limit_cooperative_threads_for_proc SYSCTL_HANDLER_ARGS
189*4f1223e8SApple OSS Distributions {
190*4f1223e8SApple OSS Distributions #pragma unused(arg1, arg2, oidp)
191*4f1223e8SApple OSS Distributions int input_pool_size = 0;
192*4f1223e8SApple OSS Distributions int changed;
193*4f1223e8SApple OSS Distributions int error = 0;
194*4f1223e8SApple OSS Distributions
195*4f1223e8SApple OSS Distributions error = sysctl_io_number(req, 0, sizeof(int), &input_pool_size, &changed);
196*4f1223e8SApple OSS Distributions if (error || !changed) {
197*4f1223e8SApple OSS Distributions return error;
198*4f1223e8SApple OSS Distributions }
199*4f1223e8SApple OSS Distributions
200*4f1223e8SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_DEFAULT 0
201*4f1223e8SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS -1
202*4f1223e8SApple OSS Distributions /* Not available currently, but sysctl interface is designed to allow these
203*4f1223e8SApple OSS Distributions * extra parameters:
204*4f1223e8SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_STRICT : -2 (across all bucket)
205*4f1223e8SApple OSS Distributions * WQ_COOPERATIVE_POOL_SIZE_CUSTOM : [1, 512]
206*4f1223e8SApple OSS Distributions */
207*4f1223e8SApple OSS Distributions
208*4f1223e8SApple OSS Distributions if (input_pool_size != WQ_COOPERATIVE_POOL_SIZE_DEFAULT
209*4f1223e8SApple OSS Distributions && input_pool_size != WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS) {
210*4f1223e8SApple OSS Distributions error = EINVAL;
211*4f1223e8SApple OSS Distributions goto out;
212*4f1223e8SApple OSS Distributions }
213*4f1223e8SApple OSS Distributions
214*4f1223e8SApple OSS Distributions proc_t p = req->p;
215*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
216*4f1223e8SApple OSS Distributions
217*4f1223e8SApple OSS Distributions if (wq != NULL) {
218*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
219*4f1223e8SApple OSS Distributions if (wq->wq_reqcount > 0 || wq->wq_nthreads > 0) {
220*4f1223e8SApple OSS Distributions // Hackily enforce that the workqueue is still new (no requests or
221*4f1223e8SApple OSS Distributions // threads)
222*4f1223e8SApple OSS Distributions error = ENOTSUP;
223*4f1223e8SApple OSS Distributions } else {
224*4f1223e8SApple OSS Distributions wq->wq_cooperative_queue_has_limited_max_size = (input_pool_size == WQ_COOPERATIVE_POOL_SIZE_STRICT_PER_QOS);
225*4f1223e8SApple OSS Distributions }
226*4f1223e8SApple OSS Distributions workq_unlock(wq);
227*4f1223e8SApple OSS Distributions } else {
228*4f1223e8SApple OSS Distributions /* This process has no workqueue, calling this syctl makes no sense */
229*4f1223e8SApple OSS Distributions return ENOTSUP;
230*4f1223e8SApple OSS Distributions }
231*4f1223e8SApple OSS Distributions
232*4f1223e8SApple OSS Distributions out:
233*4f1223e8SApple OSS Distributions return error;
234*4f1223e8SApple OSS Distributions }
235*4f1223e8SApple OSS Distributions
236*4f1223e8SApple OSS Distributions SYSCTL_PROC(_kern, OID_AUTO, wq_limit_cooperative_threads,
237*4f1223e8SApple OSS Distributions CTLFLAG_ANYBODY | CTLFLAG_MASKED | CTLFLAG_WR | CTLFLAG_LOCKED | CTLTYPE_INT, 0, 0,
238*4f1223e8SApple OSS Distributions wq_limit_cooperative_threads_for_proc,
239*4f1223e8SApple OSS Distributions "I", "Modify the max pool size of the cooperative pool");
240*4f1223e8SApple OSS Distributions
241*4f1223e8SApple OSS Distributions #pragma mark p_wqptr
242*4f1223e8SApple OSS Distributions
243*4f1223e8SApple OSS Distributions #define WQPTR_IS_INITING_VALUE ((struct workqueue *)~(uintptr_t)0)
244*4f1223e8SApple OSS Distributions
245*4f1223e8SApple OSS Distributions static struct workqueue *
proc_get_wqptr_fast(struct proc * p)246*4f1223e8SApple OSS Distributions proc_get_wqptr_fast(struct proc *p)
247*4f1223e8SApple OSS Distributions {
248*4f1223e8SApple OSS Distributions return os_atomic_load(&p->p_wqptr, relaxed);
249*4f1223e8SApple OSS Distributions }
250*4f1223e8SApple OSS Distributions
251*4f1223e8SApple OSS Distributions struct workqueue *
proc_get_wqptr(struct proc * p)252*4f1223e8SApple OSS Distributions proc_get_wqptr(struct proc *p)
253*4f1223e8SApple OSS Distributions {
254*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
255*4f1223e8SApple OSS Distributions return wq == WQPTR_IS_INITING_VALUE ? NULL : wq;
256*4f1223e8SApple OSS Distributions }
257*4f1223e8SApple OSS Distributions
258*4f1223e8SApple OSS Distributions static void
proc_set_wqptr(struct proc * p,struct workqueue * wq)259*4f1223e8SApple OSS Distributions proc_set_wqptr(struct proc *p, struct workqueue *wq)
260*4f1223e8SApple OSS Distributions {
261*4f1223e8SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, wq, release);
262*4f1223e8SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
263*4f1223e8SApple OSS Distributions proc_lock(p);
264*4f1223e8SApple OSS Distributions thread_wakeup(&p->p_wqptr);
265*4f1223e8SApple OSS Distributions proc_unlock(p);
266*4f1223e8SApple OSS Distributions }
267*4f1223e8SApple OSS Distributions }
268*4f1223e8SApple OSS Distributions
269*4f1223e8SApple OSS Distributions static bool
proc_init_wqptr_or_wait(struct proc * p)270*4f1223e8SApple OSS Distributions proc_init_wqptr_or_wait(struct proc *p)
271*4f1223e8SApple OSS Distributions {
272*4f1223e8SApple OSS Distributions struct workqueue *wq;
273*4f1223e8SApple OSS Distributions
274*4f1223e8SApple OSS Distributions proc_lock(p);
275*4f1223e8SApple OSS Distributions wq = os_atomic_load(&p->p_wqptr, relaxed);
276*4f1223e8SApple OSS Distributions
277*4f1223e8SApple OSS Distributions if (wq == NULL) {
278*4f1223e8SApple OSS Distributions os_atomic_store(&p->p_wqptr, WQPTR_IS_INITING_VALUE, relaxed);
279*4f1223e8SApple OSS Distributions proc_unlock(p);
280*4f1223e8SApple OSS Distributions return true;
281*4f1223e8SApple OSS Distributions }
282*4f1223e8SApple OSS Distributions
283*4f1223e8SApple OSS Distributions if (wq == WQPTR_IS_INITING_VALUE) {
284*4f1223e8SApple OSS Distributions assert_wait(&p->p_wqptr, THREAD_UNINT);
285*4f1223e8SApple OSS Distributions proc_unlock(p);
286*4f1223e8SApple OSS Distributions thread_block(THREAD_CONTINUE_NULL);
287*4f1223e8SApple OSS Distributions } else {
288*4f1223e8SApple OSS Distributions proc_unlock(p);
289*4f1223e8SApple OSS Distributions }
290*4f1223e8SApple OSS Distributions return false;
291*4f1223e8SApple OSS Distributions }
292*4f1223e8SApple OSS Distributions
293*4f1223e8SApple OSS Distributions static inline event_t
workq_parked_wait_event(struct uthread * uth)294*4f1223e8SApple OSS Distributions workq_parked_wait_event(struct uthread *uth)
295*4f1223e8SApple OSS Distributions {
296*4f1223e8SApple OSS Distributions return (event_t)&uth->uu_workq_stackaddr;
297*4f1223e8SApple OSS Distributions }
298*4f1223e8SApple OSS Distributions
299*4f1223e8SApple OSS Distributions static inline void
workq_thread_wakeup(struct uthread * uth)300*4f1223e8SApple OSS Distributions workq_thread_wakeup(struct uthread *uth)
301*4f1223e8SApple OSS Distributions {
302*4f1223e8SApple OSS Distributions thread_wakeup_thread(workq_parked_wait_event(uth), get_machthread(uth));
303*4f1223e8SApple OSS Distributions }
304*4f1223e8SApple OSS Distributions
305*4f1223e8SApple OSS Distributions #pragma mark wq_thactive
306*4f1223e8SApple OSS Distributions
307*4f1223e8SApple OSS Distributions #if defined(__LP64__)
308*4f1223e8SApple OSS Distributions // Layout is:
309*4f1223e8SApple OSS Distributions // 127 - 115 : 13 bits of zeroes
310*4f1223e8SApple OSS Distributions // 114 - 112 : best QoS among all pending constrained requests
311*4f1223e8SApple OSS Distributions // 111 - 0 : MGR, AUI, UI, IN, DF, UT, BG+MT buckets every 16 bits
312*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 16
313*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (7 * WQ_THACTIVE_BUCKET_WIDTH)
314*4f1223e8SApple OSS Distributions #else
315*4f1223e8SApple OSS Distributions // Layout is:
316*4f1223e8SApple OSS Distributions // 63 - 61 : best QoS among all pending constrained requests
317*4f1223e8SApple OSS Distributions // 60 : Manager bucket (0 or 1)
318*4f1223e8SApple OSS Distributions // 59 - 0 : AUI, UI, IN, DF, UT, BG+MT buckets every 10 bits
319*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_BUCKET_WIDTH 10
320*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_QOS_SHIFT (6 * WQ_THACTIVE_BUCKET_WIDTH + 1)
321*4f1223e8SApple OSS Distributions #endif
322*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_BUCKET_MASK ((1U << WQ_THACTIVE_BUCKET_WIDTH) - 1)
323*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_BUCKET_HALF (1U << (WQ_THACTIVE_BUCKET_WIDTH - 1))
324*4f1223e8SApple OSS Distributions
325*4f1223e8SApple OSS Distributions static_assert(sizeof(wq_thactive_t) * CHAR_BIT - WQ_THACTIVE_QOS_SHIFT >= 3,
326*4f1223e8SApple OSS Distributions "Make sure we have space to encode a QoS");
327*4f1223e8SApple OSS Distributions
328*4f1223e8SApple OSS Distributions static inline wq_thactive_t
_wq_thactive(struct workqueue * wq)329*4f1223e8SApple OSS Distributions _wq_thactive(struct workqueue *wq)
330*4f1223e8SApple OSS Distributions {
331*4f1223e8SApple OSS Distributions return os_atomic_load_wide(&wq->wq_thactive, relaxed);
332*4f1223e8SApple OSS Distributions }
333*4f1223e8SApple OSS Distributions
334*4f1223e8SApple OSS Distributions static inline uint8_t
_wq_bucket(thread_qos_t qos)335*4f1223e8SApple OSS Distributions _wq_bucket(thread_qos_t qos)
336*4f1223e8SApple OSS Distributions {
337*4f1223e8SApple OSS Distributions // Map both BG and MT to the same bucket by over-shifting down and
338*4f1223e8SApple OSS Distributions // clamping MT and BG together.
339*4f1223e8SApple OSS Distributions switch (qos) {
340*4f1223e8SApple OSS Distributions case THREAD_QOS_MAINTENANCE:
341*4f1223e8SApple OSS Distributions return 0;
342*4f1223e8SApple OSS Distributions default:
343*4f1223e8SApple OSS Distributions return qos - 2;
344*4f1223e8SApple OSS Distributions }
345*4f1223e8SApple OSS Distributions }
346*4f1223e8SApple OSS Distributions
347*4f1223e8SApple OSS Distributions #define WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(tha) \
348*4f1223e8SApple OSS Distributions ((thread_qos_t)((tha) >> WQ_THACTIVE_QOS_SHIFT))
349*4f1223e8SApple OSS Distributions
350*4f1223e8SApple OSS Distributions static inline thread_qos_t
_wq_thactive_best_constrained_req_qos(struct workqueue * wq)351*4f1223e8SApple OSS Distributions _wq_thactive_best_constrained_req_qos(struct workqueue *wq)
352*4f1223e8SApple OSS Distributions {
353*4f1223e8SApple OSS Distributions // Avoid expensive atomic operations: the three bits we're loading are in
354*4f1223e8SApple OSS Distributions // a single byte, and always updated under the workqueue lock
355*4f1223e8SApple OSS Distributions wq_thactive_t v = *(wq_thactive_t *)&wq->wq_thactive;
356*4f1223e8SApple OSS Distributions return WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(v);
357*4f1223e8SApple OSS Distributions }
358*4f1223e8SApple OSS Distributions
359*4f1223e8SApple OSS Distributions static void
_wq_thactive_refresh_best_constrained_req_qos(struct workqueue * wq)360*4f1223e8SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(struct workqueue *wq)
361*4f1223e8SApple OSS Distributions {
362*4f1223e8SApple OSS Distributions thread_qos_t old_qos, new_qos;
363*4f1223e8SApple OSS Distributions workq_threadreq_t req;
364*4f1223e8SApple OSS Distributions
365*4f1223e8SApple OSS Distributions req = priority_queue_max(&wq->wq_constrained_queue,
366*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
367*4f1223e8SApple OSS Distributions new_qos = req ? req->tr_qos : THREAD_QOS_UNSPECIFIED;
368*4f1223e8SApple OSS Distributions old_qos = _wq_thactive_best_constrained_req_qos(wq);
369*4f1223e8SApple OSS Distributions if (old_qos != new_qos) {
370*4f1223e8SApple OSS Distributions long delta = (long)new_qos - (long)old_qos;
371*4f1223e8SApple OSS Distributions wq_thactive_t v = (wq_thactive_t)delta << WQ_THACTIVE_QOS_SHIFT;
372*4f1223e8SApple OSS Distributions /*
373*4f1223e8SApple OSS Distributions * We can do an atomic add relative to the initial load because updates
374*4f1223e8SApple OSS Distributions * to this qos are always serialized under the workqueue lock.
375*4f1223e8SApple OSS Distributions */
376*4f1223e8SApple OSS Distributions v = os_atomic_add(&wq->wq_thactive, v, relaxed);
377*4f1223e8SApple OSS Distributions #ifdef __LP64__
378*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, (uint64_t)v,
379*4f1223e8SApple OSS Distributions (uint64_t)(v >> 64), 0);
380*4f1223e8SApple OSS Distributions #else
381*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thactive_update, wq, v, 0, 0);
382*4f1223e8SApple OSS Distributions #endif
383*4f1223e8SApple OSS Distributions }
384*4f1223e8SApple OSS Distributions }
385*4f1223e8SApple OSS Distributions
386*4f1223e8SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_offset_for_qos(thread_qos_t qos)387*4f1223e8SApple OSS Distributions _wq_thactive_offset_for_qos(thread_qos_t qos)
388*4f1223e8SApple OSS Distributions {
389*4f1223e8SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
390*4f1223e8SApple OSS Distributions __builtin_assume(bucket < WORKQ_NUM_BUCKETS);
391*4f1223e8SApple OSS Distributions return (wq_thactive_t)1 << (bucket * WQ_THACTIVE_BUCKET_WIDTH);
392*4f1223e8SApple OSS Distributions }
393*4f1223e8SApple OSS Distributions
394*4f1223e8SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_inc(struct workqueue * wq,thread_qos_t qos)395*4f1223e8SApple OSS Distributions _wq_thactive_inc(struct workqueue *wq, thread_qos_t qos)
396*4f1223e8SApple OSS Distributions {
397*4f1223e8SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
398*4f1223e8SApple OSS Distributions return os_atomic_add_orig(&wq->wq_thactive, v, relaxed);
399*4f1223e8SApple OSS Distributions }
400*4f1223e8SApple OSS Distributions
401*4f1223e8SApple OSS Distributions static inline wq_thactive_t
_wq_thactive_dec(struct workqueue * wq,thread_qos_t qos)402*4f1223e8SApple OSS Distributions _wq_thactive_dec(struct workqueue *wq, thread_qos_t qos)
403*4f1223e8SApple OSS Distributions {
404*4f1223e8SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(qos);
405*4f1223e8SApple OSS Distributions return os_atomic_sub_orig(&wq->wq_thactive, v, relaxed);
406*4f1223e8SApple OSS Distributions }
407*4f1223e8SApple OSS Distributions
408*4f1223e8SApple OSS Distributions static inline void
_wq_thactive_move(struct workqueue * wq,thread_qos_t old_qos,thread_qos_t new_qos)409*4f1223e8SApple OSS Distributions _wq_thactive_move(struct workqueue *wq,
410*4f1223e8SApple OSS Distributions thread_qos_t old_qos, thread_qos_t new_qos)
411*4f1223e8SApple OSS Distributions {
412*4f1223e8SApple OSS Distributions wq_thactive_t v = _wq_thactive_offset_for_qos(new_qos) -
413*4f1223e8SApple OSS Distributions _wq_thactive_offset_for_qos(old_qos);
414*4f1223e8SApple OSS Distributions os_atomic_add(&wq->wq_thactive, v, relaxed);
415*4f1223e8SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(old_qos)]--;
416*4f1223e8SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(new_qos)]++;
417*4f1223e8SApple OSS Distributions }
418*4f1223e8SApple OSS Distributions
419*4f1223e8SApple OSS Distributions static inline uint32_t
_wq_thactive_aggregate_downto_qos(struct workqueue * wq,wq_thactive_t v,thread_qos_t qos,uint32_t * busycount,uint32_t * max_busycount)420*4f1223e8SApple OSS Distributions _wq_thactive_aggregate_downto_qos(struct workqueue *wq, wq_thactive_t v,
421*4f1223e8SApple OSS Distributions thread_qos_t qos, uint32_t *busycount, uint32_t *max_busycount)
422*4f1223e8SApple OSS Distributions {
423*4f1223e8SApple OSS Distributions uint32_t count = 0, active;
424*4f1223e8SApple OSS Distributions uint64_t curtime;
425*4f1223e8SApple OSS Distributions
426*4f1223e8SApple OSS Distributions assert(WORKQ_THREAD_QOS_MIN <= qos && qos <= WORKQ_THREAD_QOS_MAX);
427*4f1223e8SApple OSS Distributions
428*4f1223e8SApple OSS Distributions if (busycount) {
429*4f1223e8SApple OSS Distributions curtime = mach_absolute_time();
430*4f1223e8SApple OSS Distributions *busycount = 0;
431*4f1223e8SApple OSS Distributions }
432*4f1223e8SApple OSS Distributions if (max_busycount) {
433*4f1223e8SApple OSS Distributions *max_busycount = THREAD_QOS_LAST - qos;
434*4f1223e8SApple OSS Distributions }
435*4f1223e8SApple OSS Distributions
436*4f1223e8SApple OSS Distributions uint8_t i = _wq_bucket(qos);
437*4f1223e8SApple OSS Distributions v >>= i * WQ_THACTIVE_BUCKET_WIDTH;
438*4f1223e8SApple OSS Distributions for (; i < WORKQ_NUM_QOS_BUCKETS; i++, v >>= WQ_THACTIVE_BUCKET_WIDTH) {
439*4f1223e8SApple OSS Distributions active = v & WQ_THACTIVE_BUCKET_MASK;
440*4f1223e8SApple OSS Distributions count += active;
441*4f1223e8SApple OSS Distributions
442*4f1223e8SApple OSS Distributions if (busycount && wq->wq_thscheduled_count[i] > active) {
443*4f1223e8SApple OSS Distributions if (workq_thread_is_busy(curtime, &wq->wq_lastblocked_ts[i])) {
444*4f1223e8SApple OSS Distributions /*
445*4f1223e8SApple OSS Distributions * We only consider the last blocked thread for a given bucket
446*4f1223e8SApple OSS Distributions * as busy because we don't want to take the list lock in each
447*4f1223e8SApple OSS Distributions * sched callback. However this is an approximation that could
448*4f1223e8SApple OSS Distributions * contribute to thread creation storms.
449*4f1223e8SApple OSS Distributions */
450*4f1223e8SApple OSS Distributions (*busycount)++;
451*4f1223e8SApple OSS Distributions }
452*4f1223e8SApple OSS Distributions }
453*4f1223e8SApple OSS Distributions }
454*4f1223e8SApple OSS Distributions
455*4f1223e8SApple OSS Distributions return count;
456*4f1223e8SApple OSS Distributions }
457*4f1223e8SApple OSS Distributions
458*4f1223e8SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
459*4f1223e8SApple OSS Distributions * for any overrides */
460*4f1223e8SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_dec(struct workqueue * wq,thread_qos_t qos)461*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(struct workqueue *wq, thread_qos_t qos)
462*4f1223e8SApple OSS Distributions {
463*4f1223e8SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]--;
464*4f1223e8SApple OSS Distributions assert(old_scheduled_count > 0);
465*4f1223e8SApple OSS Distributions }
466*4f1223e8SApple OSS Distributions
467*4f1223e8SApple OSS Distributions /* The input qos here should be the requested QoS of the thread, not accounting
468*4f1223e8SApple OSS Distributions * for any overrides */
469*4f1223e8SApple OSS Distributions static inline void
_wq_cooperative_queue_scheduled_count_inc(struct workqueue * wq,thread_qos_t qos)470*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(struct workqueue *wq, thread_qos_t qos)
471*4f1223e8SApple OSS Distributions {
472*4f1223e8SApple OSS Distributions __assert_only uint8_t old_scheduled_count = wq->wq_cooperative_queue_scheduled_count[_wq_bucket(qos)]++;
473*4f1223e8SApple OSS Distributions assert(old_scheduled_count < UINT8_MAX);
474*4f1223e8SApple OSS Distributions }
475*4f1223e8SApple OSS Distributions
476*4f1223e8SApple OSS Distributions #pragma mark wq_flags
477*4f1223e8SApple OSS Distributions
478*4f1223e8SApple OSS Distributions static inline uint32_t
_wq_flags(struct workqueue * wq)479*4f1223e8SApple OSS Distributions _wq_flags(struct workqueue *wq)
480*4f1223e8SApple OSS Distributions {
481*4f1223e8SApple OSS Distributions return os_atomic_load(&wq->wq_flags, relaxed);
482*4f1223e8SApple OSS Distributions }
483*4f1223e8SApple OSS Distributions
484*4f1223e8SApple OSS Distributions static inline bool
_wq_exiting(struct workqueue * wq)485*4f1223e8SApple OSS Distributions _wq_exiting(struct workqueue *wq)
486*4f1223e8SApple OSS Distributions {
487*4f1223e8SApple OSS Distributions return _wq_flags(wq) & WQ_EXITING;
488*4f1223e8SApple OSS Distributions }
489*4f1223e8SApple OSS Distributions
490*4f1223e8SApple OSS Distributions bool
workq_is_exiting(struct proc * p)491*4f1223e8SApple OSS Distributions workq_is_exiting(struct proc *p)
492*4f1223e8SApple OSS Distributions {
493*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
494*4f1223e8SApple OSS Distributions return !wq || _wq_exiting(wq);
495*4f1223e8SApple OSS Distributions }
496*4f1223e8SApple OSS Distributions
497*4f1223e8SApple OSS Distributions
498*4f1223e8SApple OSS Distributions #pragma mark workqueue lock
499*4f1223e8SApple OSS Distributions
500*4f1223e8SApple OSS Distributions static bool
workq_lock_is_acquired_kdp(struct workqueue * wq)501*4f1223e8SApple OSS Distributions workq_lock_is_acquired_kdp(struct workqueue *wq)
502*4f1223e8SApple OSS Distributions {
503*4f1223e8SApple OSS Distributions return kdp_lck_ticket_is_acquired(&wq->wq_lock);
504*4f1223e8SApple OSS Distributions }
505*4f1223e8SApple OSS Distributions
506*4f1223e8SApple OSS Distributions static inline void
workq_lock_spin(struct workqueue * wq)507*4f1223e8SApple OSS Distributions workq_lock_spin(struct workqueue *wq)
508*4f1223e8SApple OSS Distributions {
509*4f1223e8SApple OSS Distributions lck_ticket_lock(&wq->wq_lock, &workq_lck_grp);
510*4f1223e8SApple OSS Distributions }
511*4f1223e8SApple OSS Distributions
512*4f1223e8SApple OSS Distributions static inline void
workq_lock_held(struct workqueue * wq)513*4f1223e8SApple OSS Distributions workq_lock_held(struct workqueue *wq)
514*4f1223e8SApple OSS Distributions {
515*4f1223e8SApple OSS Distributions LCK_TICKET_ASSERT_OWNED(&wq->wq_lock);
516*4f1223e8SApple OSS Distributions }
517*4f1223e8SApple OSS Distributions
518*4f1223e8SApple OSS Distributions static inline bool
workq_lock_try(struct workqueue * wq)519*4f1223e8SApple OSS Distributions workq_lock_try(struct workqueue *wq)
520*4f1223e8SApple OSS Distributions {
521*4f1223e8SApple OSS Distributions return lck_ticket_lock_try(&wq->wq_lock, &workq_lck_grp);
522*4f1223e8SApple OSS Distributions }
523*4f1223e8SApple OSS Distributions
524*4f1223e8SApple OSS Distributions static inline void
workq_unlock(struct workqueue * wq)525*4f1223e8SApple OSS Distributions workq_unlock(struct workqueue *wq)
526*4f1223e8SApple OSS Distributions {
527*4f1223e8SApple OSS Distributions lck_ticket_unlock(&wq->wq_lock);
528*4f1223e8SApple OSS Distributions }
529*4f1223e8SApple OSS Distributions
530*4f1223e8SApple OSS Distributions #pragma mark idle thread lists
531*4f1223e8SApple OSS Distributions
532*4f1223e8SApple OSS Distributions #define WORKQ_POLICY_INIT(qos) \
533*4f1223e8SApple OSS Distributions (struct uu_workq_policy){ .qos_req = qos, .qos_bucket = qos }
534*4f1223e8SApple OSS Distributions
535*4f1223e8SApple OSS Distributions static inline thread_qos_t
workq_pri_bucket(struct uu_workq_policy req)536*4f1223e8SApple OSS Distributions workq_pri_bucket(struct uu_workq_policy req)
537*4f1223e8SApple OSS Distributions {
538*4f1223e8SApple OSS Distributions return MAX(MAX(req.qos_req, req.qos_max), req.qos_override);
539*4f1223e8SApple OSS Distributions }
540*4f1223e8SApple OSS Distributions
541*4f1223e8SApple OSS Distributions static inline thread_qos_t
workq_pri_override(struct uu_workq_policy req)542*4f1223e8SApple OSS Distributions workq_pri_override(struct uu_workq_policy req)
543*4f1223e8SApple OSS Distributions {
544*4f1223e8SApple OSS Distributions return MAX(workq_pri_bucket(req), req.qos_bucket);
545*4f1223e8SApple OSS Distributions }
546*4f1223e8SApple OSS Distributions
547*4f1223e8SApple OSS Distributions static inline bool
workq_thread_needs_params_change(workq_threadreq_t req,struct uthread * uth)548*4f1223e8SApple OSS Distributions workq_thread_needs_params_change(workq_threadreq_t req, struct uthread *uth)
549*4f1223e8SApple OSS Distributions {
550*4f1223e8SApple OSS Distributions workq_threadreq_param_t cur_trp, req_trp = { };
551*4f1223e8SApple OSS Distributions
552*4f1223e8SApple OSS Distributions cur_trp.trp_value = uth->uu_save.uus_workq_park_data.workloop_params;
553*4f1223e8SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
554*4f1223e8SApple OSS Distributions req_trp = kqueue_threadreq_workloop_param(req);
555*4f1223e8SApple OSS Distributions }
556*4f1223e8SApple OSS Distributions
557*4f1223e8SApple OSS Distributions /*
558*4f1223e8SApple OSS Distributions * CPU percent flags are handled separately to policy changes, so ignore
559*4f1223e8SApple OSS Distributions * them for all of these checks.
560*4f1223e8SApple OSS Distributions */
561*4f1223e8SApple OSS Distributions uint16_t cur_flags = (cur_trp.trp_flags & ~TRP_CPUPERCENT);
562*4f1223e8SApple OSS Distributions uint16_t req_flags = (req_trp.trp_flags & ~TRP_CPUPERCENT);
563*4f1223e8SApple OSS Distributions
564*4f1223e8SApple OSS Distributions if (!req_flags && !cur_flags) {
565*4f1223e8SApple OSS Distributions return false;
566*4f1223e8SApple OSS Distributions }
567*4f1223e8SApple OSS Distributions
568*4f1223e8SApple OSS Distributions if (req_flags != cur_flags) {
569*4f1223e8SApple OSS Distributions return true;
570*4f1223e8SApple OSS Distributions }
571*4f1223e8SApple OSS Distributions
572*4f1223e8SApple OSS Distributions if ((req_flags & TRP_PRIORITY) && req_trp.trp_pri != cur_trp.trp_pri) {
573*4f1223e8SApple OSS Distributions return true;
574*4f1223e8SApple OSS Distributions }
575*4f1223e8SApple OSS Distributions
576*4f1223e8SApple OSS Distributions if ((req_flags & TRP_POLICY) && req_trp.trp_pol != cur_trp.trp_pol) {
577*4f1223e8SApple OSS Distributions return true;
578*4f1223e8SApple OSS Distributions }
579*4f1223e8SApple OSS Distributions
580*4f1223e8SApple OSS Distributions return false;
581*4f1223e8SApple OSS Distributions }
582*4f1223e8SApple OSS Distributions
583*4f1223e8SApple OSS Distributions static inline bool
workq_thread_needs_priority_change(workq_threadreq_t req,struct uthread * uth)584*4f1223e8SApple OSS Distributions workq_thread_needs_priority_change(workq_threadreq_t req, struct uthread *uth)
585*4f1223e8SApple OSS Distributions {
586*4f1223e8SApple OSS Distributions if (workq_thread_needs_params_change(req, uth)) {
587*4f1223e8SApple OSS Distributions return true;
588*4f1223e8SApple OSS Distributions }
589*4f1223e8SApple OSS Distributions
590*4f1223e8SApple OSS Distributions if (req->tr_qos != workq_pri_override(uth->uu_workq_pri)) {
591*4f1223e8SApple OSS Distributions return true;
592*4f1223e8SApple OSS Distributions }
593*4f1223e8SApple OSS Distributions
594*4f1223e8SApple OSS Distributions #if CONFIG_PREADOPT_TG
595*4f1223e8SApple OSS Distributions thread_group_qos_t tg = kqr_preadopt_thread_group(req);
596*4f1223e8SApple OSS Distributions if (KQWL_HAS_VALID_PREADOPTED_TG(tg)) {
597*4f1223e8SApple OSS Distributions /*
598*4f1223e8SApple OSS Distributions * Ideally, we'd add check here to see if thread's preadopt TG is same
599*4f1223e8SApple OSS Distributions * as the thread requests's thread group and short circuit if that is
600*4f1223e8SApple OSS Distributions * the case. But in the interest of keeping the code clean and not
601*4f1223e8SApple OSS Distributions * taking the thread lock here, we're going to skip this. We will
602*4f1223e8SApple OSS Distributions * eventually shortcircuit once we try to set the preadoption thread
603*4f1223e8SApple OSS Distributions * group on the thread.
604*4f1223e8SApple OSS Distributions */
605*4f1223e8SApple OSS Distributions return true;
606*4f1223e8SApple OSS Distributions }
607*4f1223e8SApple OSS Distributions #endif
608*4f1223e8SApple OSS Distributions
609*4f1223e8SApple OSS Distributions return false;
610*4f1223e8SApple OSS Distributions }
611*4f1223e8SApple OSS Distributions
612*4f1223e8SApple OSS Distributions /* Input thread must be self. Called during self override, resetting overrides
613*4f1223e8SApple OSS Distributions * or while processing kevents
614*4f1223e8SApple OSS Distributions *
615*4f1223e8SApple OSS Distributions * Called with workq lock held. Sometimes also the thread mutex
616*4f1223e8SApple OSS Distributions */
617*4f1223e8SApple OSS Distributions static void
workq_thread_update_bucket(proc_t p,struct workqueue * wq,struct uthread * uth,struct uu_workq_policy old_pri,struct uu_workq_policy new_pri,bool force_run)618*4f1223e8SApple OSS Distributions workq_thread_update_bucket(proc_t p, struct workqueue *wq, struct uthread *uth,
619*4f1223e8SApple OSS Distributions struct uu_workq_policy old_pri, struct uu_workq_policy new_pri,
620*4f1223e8SApple OSS Distributions bool force_run)
621*4f1223e8SApple OSS Distributions {
622*4f1223e8SApple OSS Distributions assert(uth == current_uthread());
623*4f1223e8SApple OSS Distributions
624*4f1223e8SApple OSS Distributions thread_qos_t old_bucket = old_pri.qos_bucket;
625*4f1223e8SApple OSS Distributions thread_qos_t new_bucket = workq_pri_bucket(new_pri);
626*4f1223e8SApple OSS Distributions
627*4f1223e8SApple OSS Distributions if ((old_bucket != new_bucket) &&
628*4f1223e8SApple OSS Distributions !workq_thread_is_permanently_bound(uth)) {
629*4f1223e8SApple OSS Distributions _wq_thactive_move(wq, old_bucket, new_bucket);
630*4f1223e8SApple OSS Distributions }
631*4f1223e8SApple OSS Distributions
632*4f1223e8SApple OSS Distributions new_pri.qos_bucket = new_bucket;
633*4f1223e8SApple OSS Distributions uth->uu_workq_pri = new_pri;
634*4f1223e8SApple OSS Distributions
635*4f1223e8SApple OSS Distributions if (old_pri.qos_override != new_pri.qos_override) {
636*4f1223e8SApple OSS Distributions thread_set_workq_override(get_machthread(uth), new_pri.qos_override);
637*4f1223e8SApple OSS Distributions }
638*4f1223e8SApple OSS Distributions
639*4f1223e8SApple OSS Distributions if (wq->wq_reqcount &&
640*4f1223e8SApple OSS Distributions !workq_thread_is_permanently_bound(uth) &&
641*4f1223e8SApple OSS Distributions (old_bucket > new_bucket || force_run)) {
642*4f1223e8SApple OSS Distributions int flags = WORKQ_THREADREQ_CAN_CREATE_THREADS;
643*4f1223e8SApple OSS Distributions if (old_bucket > new_bucket) {
644*4f1223e8SApple OSS Distributions /*
645*4f1223e8SApple OSS Distributions * When lowering our bucket, we may unblock a thread request,
646*4f1223e8SApple OSS Distributions * but we can't drop our priority before we have evaluated
647*4f1223e8SApple OSS Distributions * whether this is the case, and if we ever drop the workqueue lock
648*4f1223e8SApple OSS Distributions * that would cause a priority inversion.
649*4f1223e8SApple OSS Distributions *
650*4f1223e8SApple OSS Distributions * We hence have to disallow thread creation in that case.
651*4f1223e8SApple OSS Distributions */
652*4f1223e8SApple OSS Distributions flags = 0;
653*4f1223e8SApple OSS Distributions }
654*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, flags);
655*4f1223e8SApple OSS Distributions }
656*4f1223e8SApple OSS Distributions }
657*4f1223e8SApple OSS Distributions
658*4f1223e8SApple OSS Distributions /*
659*4f1223e8SApple OSS Distributions * Sets/resets the cpu percent limits on the current thread. We can't set
660*4f1223e8SApple OSS Distributions * these limits from outside of the current thread, so this function needs
661*4f1223e8SApple OSS Distributions * to be called when we're executing on the intended
662*4f1223e8SApple OSS Distributions */
663*4f1223e8SApple OSS Distributions static void
workq_thread_reset_cpupercent(workq_threadreq_t req,struct uthread * uth)664*4f1223e8SApple OSS Distributions workq_thread_reset_cpupercent(workq_threadreq_t req, struct uthread *uth)
665*4f1223e8SApple OSS Distributions {
666*4f1223e8SApple OSS Distributions assert(uth == current_uthread());
667*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = { };
668*4f1223e8SApple OSS Distributions
669*4f1223e8SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
670*4f1223e8SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
671*4f1223e8SApple OSS Distributions }
672*4f1223e8SApple OSS Distributions
673*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_CPUPERCENT) {
674*4f1223e8SApple OSS Distributions /*
675*4f1223e8SApple OSS Distributions * Going through disable when we have an existing CPU percent limit
676*4f1223e8SApple OSS Distributions * set will force the ledger to refill the token bucket of the current
677*4f1223e8SApple OSS Distributions * thread. Removing any penalty applied by previous thread use.
678*4f1223e8SApple OSS Distributions */
679*4f1223e8SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_DISABLE, 0, 0);
680*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_CPUPERCENT;
681*4f1223e8SApple OSS Distributions }
682*4f1223e8SApple OSS Distributions
683*4f1223e8SApple OSS Distributions if (trp.trp_flags & TRP_CPUPERCENT) {
684*4f1223e8SApple OSS Distributions thread_set_cpulimit(THREAD_CPULIMIT_BLOCK, trp.trp_cpupercent,
685*4f1223e8SApple OSS Distributions (uint64_t)trp.trp_refillms * NSEC_PER_SEC);
686*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_CPUPERCENT;
687*4f1223e8SApple OSS Distributions }
688*4f1223e8SApple OSS Distributions }
689*4f1223e8SApple OSS Distributions
690*4f1223e8SApple OSS Distributions /*
691*4f1223e8SApple OSS Distributions * This function is always called with the workq lock, except for the
692*4f1223e8SApple OSS Distributions * permanently bound workqueue thread, which instead requires the kqlock.
693*4f1223e8SApple OSS Distributions * See locking model for bound thread's uu_workq_flags.
694*4f1223e8SApple OSS Distributions */
695*4f1223e8SApple OSS Distributions static void
workq_thread_reset_pri(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req,bool unpark)696*4f1223e8SApple OSS Distributions workq_thread_reset_pri(struct workqueue *wq, struct uthread *uth,
697*4f1223e8SApple OSS Distributions workq_threadreq_t req, bool unpark)
698*4f1223e8SApple OSS Distributions {
699*4f1223e8SApple OSS Distributions thread_t th = get_machthread(uth);
700*4f1223e8SApple OSS Distributions thread_qos_t qos = req ? req->tr_qos : WORKQ_THREAD_QOS_CLEANUP;
701*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = { };
702*4f1223e8SApple OSS Distributions int priority = 31;
703*4f1223e8SApple OSS Distributions int policy = POLICY_TIMESHARE;
704*4f1223e8SApple OSS Distributions
705*4f1223e8SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS)) {
706*4f1223e8SApple OSS Distributions trp = kqueue_threadreq_workloop_param(req);
707*4f1223e8SApple OSS Distributions }
708*4f1223e8SApple OSS Distributions
709*4f1223e8SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(qos);
710*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_OUTSIDE_QOS;
711*4f1223e8SApple OSS Distributions
712*4f1223e8SApple OSS Distributions if (unpark) {
713*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
714*4f1223e8SApple OSS Distributions // qos sent out to userspace (may differ from uu_workq_pri on param threads)
715*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
716*4f1223e8SApple OSS Distributions }
717*4f1223e8SApple OSS Distributions
718*4f1223e8SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
719*4f1223e8SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
720*4f1223e8SApple OSS Distributions assert(trp.trp_value == 0); // manager qos and thread policy don't mix
721*4f1223e8SApple OSS Distributions
722*4f1223e8SApple OSS Distributions if (_pthread_priority_has_sched_pri(mgr_pri)) {
723*4f1223e8SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
724*4f1223e8SApple OSS Distributions thread_set_workq_pri(th, THREAD_QOS_UNSPECIFIED, mgr_pri,
725*4f1223e8SApple OSS Distributions POLICY_TIMESHARE);
726*4f1223e8SApple OSS Distributions return;
727*4f1223e8SApple OSS Distributions }
728*4f1223e8SApple OSS Distributions
729*4f1223e8SApple OSS Distributions qos = _pthread_priority_thread_qos(mgr_pri);
730*4f1223e8SApple OSS Distributions } else {
731*4f1223e8SApple OSS Distributions if (trp.trp_flags & TRP_PRIORITY) {
732*4f1223e8SApple OSS Distributions qos = THREAD_QOS_UNSPECIFIED;
733*4f1223e8SApple OSS Distributions priority = trp.trp_pri;
734*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_OUTSIDE_QOS;
735*4f1223e8SApple OSS Distributions }
736*4f1223e8SApple OSS Distributions
737*4f1223e8SApple OSS Distributions if (trp.trp_flags & TRP_POLICY) {
738*4f1223e8SApple OSS Distributions policy = trp.trp_pol;
739*4f1223e8SApple OSS Distributions }
740*4f1223e8SApple OSS Distributions }
741*4f1223e8SApple OSS Distributions
742*4f1223e8SApple OSS Distributions #if CONFIG_PREADOPT_TG
743*4f1223e8SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP)) {
744*4f1223e8SApple OSS Distributions /*
745*4f1223e8SApple OSS Distributions * For kqwl permanently configured with a thread group, we can safely borrow
746*4f1223e8SApple OSS Distributions * +1 ref from kqwl_preadopt_tg. A thread then takes additional +1 ref
747*4f1223e8SApple OSS Distributions * for itself via thread_set_preadopt_thread_group.
748*4f1223e8SApple OSS Distributions *
749*4f1223e8SApple OSS Distributions * In all other cases, we cannot safely read and borrow the reference from the kqwl
750*4f1223e8SApple OSS Distributions * since it can disappear from under us at any time due to the max-ing logic in
751*4f1223e8SApple OSS Distributions * kqueue_set_preadopted_thread_group.
752*4f1223e8SApple OSS Distributions *
753*4f1223e8SApple OSS Distributions * As such, we do the following dance:
754*4f1223e8SApple OSS Distributions *
755*4f1223e8SApple OSS Distributions * 1) cmpxchng and steal the kqwl's preadopt thread group and leave
756*4f1223e8SApple OSS Distributions * behind with (NULL + QoS). At this point, we have the reference
757*4f1223e8SApple OSS Distributions * to the thread group from the kqwl.
758*4f1223e8SApple OSS Distributions * 2) Have the thread set the preadoption thread group on itself.
759*4f1223e8SApple OSS Distributions * 3) cmpxchng from (NULL + QoS) which we set earlier in (1), back to
760*4f1223e8SApple OSS Distributions * thread_group + QoS. ie we try to give the reference back to the kqwl.
761*4f1223e8SApple OSS Distributions * If we fail, that's because a higher QoS thread group was set on the
762*4f1223e8SApple OSS Distributions * kqwl in kqueue_set_preadopted_thread_group in which case, we need to
763*4f1223e8SApple OSS Distributions * go back to (1).
764*4f1223e8SApple OSS Distributions */
765*4f1223e8SApple OSS Distributions
766*4f1223e8SApple OSS Distributions _Atomic(struct thread_group *) * tg_loc = kqr_preadopt_thread_group_addr(req);
767*4f1223e8SApple OSS Distributions
768*4f1223e8SApple OSS Distributions thread_group_qos_t old_tg, new_tg;
769*4f1223e8SApple OSS Distributions int ret = 0;
770*4f1223e8SApple OSS Distributions again:
771*4f1223e8SApple OSS Distributions ret = os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
772*4f1223e8SApple OSS Distributions if ((!KQWL_HAS_VALID_PREADOPTED_TG(old_tg)) ||
773*4f1223e8SApple OSS Distributions KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
774*4f1223e8SApple OSS Distributions os_atomic_rmw_loop_give_up(break);
775*4f1223e8SApple OSS Distributions }
776*4f1223e8SApple OSS Distributions
777*4f1223e8SApple OSS Distributions /*
778*4f1223e8SApple OSS Distributions * Leave the QoS behind - kqueue_set_preadopted_thread_group will
779*4f1223e8SApple OSS Distributions * only modify it if there is a higher QoS thread group to attach
780*4f1223e8SApple OSS Distributions */
781*4f1223e8SApple OSS Distributions new_tg = (thread_group_qos_t) ((uintptr_t) old_tg & KQWL_PREADOPT_TG_QOS_MASK);
782*4f1223e8SApple OSS Distributions });
783*4f1223e8SApple OSS Distributions
784*4f1223e8SApple OSS Distributions if (ret) {
785*4f1223e8SApple OSS Distributions /*
786*4f1223e8SApple OSS Distributions * We successfully took the ref from the kqwl so set it on the
787*4f1223e8SApple OSS Distributions * thread now
788*4f1223e8SApple OSS Distributions */
789*4f1223e8SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
790*4f1223e8SApple OSS Distributions
791*4f1223e8SApple OSS Distributions thread_group_qos_t thread_group_to_expect = new_tg;
792*4f1223e8SApple OSS Distributions thread_group_qos_t thread_group_to_set = old_tg;
793*4f1223e8SApple OSS Distributions
794*4f1223e8SApple OSS Distributions os_atomic_rmw_loop(tg_loc, old_tg, new_tg, relaxed, {
795*4f1223e8SApple OSS Distributions if (old_tg != thread_group_to_expect) {
796*4f1223e8SApple OSS Distributions /*
797*4f1223e8SApple OSS Distributions * There was an intervening write to the kqwl_preadopt_tg,
798*4f1223e8SApple OSS Distributions * and it has a higher QoS than what we are working with
799*4f1223e8SApple OSS Distributions * here. Abandon our current adopted thread group and redo
800*4f1223e8SApple OSS Distributions * the full dance
801*4f1223e8SApple OSS Distributions */
802*4f1223e8SApple OSS Distributions thread_group_deallocate_safe(KQWL_GET_PREADOPTED_TG(thread_group_to_set));
803*4f1223e8SApple OSS Distributions os_atomic_rmw_loop_give_up(goto again);
804*4f1223e8SApple OSS Distributions }
805*4f1223e8SApple OSS Distributions
806*4f1223e8SApple OSS Distributions new_tg = thread_group_to_set;
807*4f1223e8SApple OSS Distributions });
808*4f1223e8SApple OSS Distributions } else {
809*4f1223e8SApple OSS Distributions if (KQWL_HAS_PERMANENT_PREADOPTED_TG(old_tg)) {
810*4f1223e8SApple OSS Distributions thread_set_preadopt_thread_group(th, KQWL_GET_PREADOPTED_TG(old_tg));
811*4f1223e8SApple OSS Distributions } else {
812*4f1223e8SApple OSS Distributions /* Nothing valid on the kqwl, just clear what's on the thread */
813*4f1223e8SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
814*4f1223e8SApple OSS Distributions }
815*4f1223e8SApple OSS Distributions }
816*4f1223e8SApple OSS Distributions } else {
817*4f1223e8SApple OSS Distributions /* Not even a kqwl, clear what's on the thread */
818*4f1223e8SApple OSS Distributions thread_set_preadopt_thread_group(th, NULL);
819*4f1223e8SApple OSS Distributions }
820*4f1223e8SApple OSS Distributions #endif
821*4f1223e8SApple OSS Distributions thread_set_workq_pri(th, qos, priority, policy);
822*4f1223e8SApple OSS Distributions }
823*4f1223e8SApple OSS Distributions
824*4f1223e8SApple OSS Distributions /*
825*4f1223e8SApple OSS Distributions * Called by kevent with the NOTE_WL_THREAD_REQUEST knote lock held,
826*4f1223e8SApple OSS Distributions * every time a servicer is being told about a new max QoS.
827*4f1223e8SApple OSS Distributions */
828*4f1223e8SApple OSS Distributions void
workq_thread_set_max_qos(struct proc * p,workq_threadreq_t kqr)829*4f1223e8SApple OSS Distributions workq_thread_set_max_qos(struct proc *p, workq_threadreq_t kqr)
830*4f1223e8SApple OSS Distributions {
831*4f1223e8SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
832*4f1223e8SApple OSS Distributions struct uthread *uth = current_uthread();
833*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
834*4f1223e8SApple OSS Distributions thread_qos_t qos = kqr->tr_kq_qos_index;
835*4f1223e8SApple OSS Distributions
836*4f1223e8SApple OSS Distributions if (uth->uu_workq_pri.qos_max == qos) {
837*4f1223e8SApple OSS Distributions return;
838*4f1223e8SApple OSS Distributions }
839*4f1223e8SApple OSS Distributions
840*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
841*4f1223e8SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
842*4f1223e8SApple OSS Distributions new_pri.qos_max = qos;
843*4f1223e8SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
844*4f1223e8SApple OSS Distributions workq_unlock(wq);
845*4f1223e8SApple OSS Distributions }
846*4f1223e8SApple OSS Distributions
847*4f1223e8SApple OSS Distributions #pragma mark idle threads accounting and handling
848*4f1223e8SApple OSS Distributions
849*4f1223e8SApple OSS Distributions static inline struct uthread *
workq_oldest_killable_idle_thread(struct workqueue * wq)850*4f1223e8SApple OSS Distributions workq_oldest_killable_idle_thread(struct workqueue *wq)
851*4f1223e8SApple OSS Distributions {
852*4f1223e8SApple OSS Distributions struct uthread *uth = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
853*4f1223e8SApple OSS Distributions
854*4f1223e8SApple OSS Distributions if (uth && !uth->uu_save.uus_workq_park_data.has_stack) {
855*4f1223e8SApple OSS Distributions uth = TAILQ_PREV(uth, workq_uthread_head, uu_workq_entry);
856*4f1223e8SApple OSS Distributions if (uth) {
857*4f1223e8SApple OSS Distributions assert(uth->uu_save.uus_workq_park_data.has_stack);
858*4f1223e8SApple OSS Distributions }
859*4f1223e8SApple OSS Distributions }
860*4f1223e8SApple OSS Distributions return uth;
861*4f1223e8SApple OSS Distributions }
862*4f1223e8SApple OSS Distributions
863*4f1223e8SApple OSS Distributions static inline uint64_t
workq_kill_delay_for_idle_thread(struct workqueue * wq)864*4f1223e8SApple OSS Distributions workq_kill_delay_for_idle_thread(struct workqueue *wq)
865*4f1223e8SApple OSS Distributions {
866*4f1223e8SApple OSS Distributions uint64_t delay = wq_reduce_pool_window.abstime;
867*4f1223e8SApple OSS Distributions uint16_t idle = wq->wq_thidlecount;
868*4f1223e8SApple OSS Distributions
869*4f1223e8SApple OSS Distributions /*
870*4f1223e8SApple OSS Distributions * If we have less than wq_death_max_load threads, have a 5s timer.
871*4f1223e8SApple OSS Distributions *
872*4f1223e8SApple OSS Distributions * For the next wq_max_constrained_threads ones, decay linearly from
873*4f1223e8SApple OSS Distributions * from 5s to 50ms.
874*4f1223e8SApple OSS Distributions */
875*4f1223e8SApple OSS Distributions if (idle <= wq_death_max_load) {
876*4f1223e8SApple OSS Distributions return delay;
877*4f1223e8SApple OSS Distributions }
878*4f1223e8SApple OSS Distributions
879*4f1223e8SApple OSS Distributions if (wq_max_constrained_threads > idle - wq_death_max_load) {
880*4f1223e8SApple OSS Distributions delay *= (wq_max_constrained_threads - (idle - wq_death_max_load));
881*4f1223e8SApple OSS Distributions }
882*4f1223e8SApple OSS Distributions return delay / wq_max_constrained_threads;
883*4f1223e8SApple OSS Distributions }
884*4f1223e8SApple OSS Distributions
885*4f1223e8SApple OSS Distributions static inline bool
workq_should_kill_idle_thread(struct workqueue * wq,struct uthread * uth,uint64_t now)886*4f1223e8SApple OSS Distributions workq_should_kill_idle_thread(struct workqueue *wq, struct uthread *uth,
887*4f1223e8SApple OSS Distributions uint64_t now)
888*4f1223e8SApple OSS Distributions {
889*4f1223e8SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
890*4f1223e8SApple OSS Distributions return now - uth->uu_save.uus_workq_park_data.idle_stamp > delay;
891*4f1223e8SApple OSS Distributions }
892*4f1223e8SApple OSS Distributions
893*4f1223e8SApple OSS Distributions static void
workq_death_call_schedule(struct workqueue * wq,uint64_t deadline)894*4f1223e8SApple OSS Distributions workq_death_call_schedule(struct workqueue *wq, uint64_t deadline)
895*4f1223e8SApple OSS Distributions {
896*4f1223e8SApple OSS Distributions uint32_t wq_flags = os_atomic_load(&wq->wq_flags, relaxed);
897*4f1223e8SApple OSS Distributions
898*4f1223e8SApple OSS Distributions if (wq_flags & (WQ_EXITING | WQ_DEATH_CALL_SCHEDULED)) {
899*4f1223e8SApple OSS Distributions return;
900*4f1223e8SApple OSS Distributions }
901*4f1223e8SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
902*4f1223e8SApple OSS Distributions
903*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_NONE, wq, 1, 0, 0);
904*4f1223e8SApple OSS Distributions
905*4f1223e8SApple OSS Distributions /*
906*4f1223e8SApple OSS Distributions * <rdar://problem/13139182> Due to how long term timers work, the leeway
907*4f1223e8SApple OSS Distributions * can't be too short, so use 500ms which is long enough that we will not
908*4f1223e8SApple OSS Distributions * wake up the CPU for killing threads, but short enough that it doesn't
909*4f1223e8SApple OSS Distributions * fall into long-term timer list shenanigans.
910*4f1223e8SApple OSS Distributions */
911*4f1223e8SApple OSS Distributions thread_call_enter_delayed_with_leeway(wq->wq_death_call, NULL, deadline,
912*4f1223e8SApple OSS Distributions wq_reduce_pool_window.abstime / 10,
913*4f1223e8SApple OSS Distributions THREAD_CALL_DELAY_LEEWAY | THREAD_CALL_DELAY_USER_BACKGROUND);
914*4f1223e8SApple OSS Distributions }
915*4f1223e8SApple OSS Distributions
916*4f1223e8SApple OSS Distributions /*
917*4f1223e8SApple OSS Distributions * `decrement` is set to the number of threads that are no longer dying:
918*4f1223e8SApple OSS Distributions * - because they have been resuscitated just in time (workq_pop_idle_thread)
919*4f1223e8SApple OSS Distributions * - or have been killed (workq_thread_terminate).
920*4f1223e8SApple OSS Distributions */
921*4f1223e8SApple OSS Distributions static void
workq_death_policy_evaluate(struct workqueue * wq,uint16_t decrement)922*4f1223e8SApple OSS Distributions workq_death_policy_evaluate(struct workqueue *wq, uint16_t decrement)
923*4f1223e8SApple OSS Distributions {
924*4f1223e8SApple OSS Distributions struct uthread *uth;
925*4f1223e8SApple OSS Distributions
926*4f1223e8SApple OSS Distributions assert(wq->wq_thdying_count >= decrement);
927*4f1223e8SApple OSS Distributions if ((wq->wq_thdying_count -= decrement) > 0) {
928*4f1223e8SApple OSS Distributions return;
929*4f1223e8SApple OSS Distributions }
930*4f1223e8SApple OSS Distributions
931*4f1223e8SApple OSS Distributions if (wq->wq_thidlecount <= 1) {
932*4f1223e8SApple OSS Distributions return;
933*4f1223e8SApple OSS Distributions }
934*4f1223e8SApple OSS Distributions
935*4f1223e8SApple OSS Distributions if ((uth = workq_oldest_killable_idle_thread(wq)) == NULL) {
936*4f1223e8SApple OSS Distributions return;
937*4f1223e8SApple OSS Distributions }
938*4f1223e8SApple OSS Distributions
939*4f1223e8SApple OSS Distributions uint64_t now = mach_absolute_time();
940*4f1223e8SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
941*4f1223e8SApple OSS Distributions
942*4f1223e8SApple OSS Distributions if (now - uth->uu_save.uus_workq_park_data.idle_stamp > delay) {
943*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
944*4f1223e8SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
945*4f1223e8SApple OSS Distributions wq->wq_thdying_count++;
946*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
947*4f1223e8SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) == 0) {
948*4f1223e8SApple OSS Distributions workq_thread_wakeup(uth);
949*4f1223e8SApple OSS Distributions }
950*4f1223e8SApple OSS Distributions return;
951*4f1223e8SApple OSS Distributions }
952*4f1223e8SApple OSS Distributions
953*4f1223e8SApple OSS Distributions workq_death_call_schedule(wq,
954*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp + delay);
955*4f1223e8SApple OSS Distributions }
956*4f1223e8SApple OSS Distributions
957*4f1223e8SApple OSS Distributions void
workq_thread_terminate(struct proc * p,struct uthread * uth)958*4f1223e8SApple OSS Distributions workq_thread_terminate(struct proc *p, struct uthread *uth)
959*4f1223e8SApple OSS Distributions {
960*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
961*4f1223e8SApple OSS Distributions
962*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
963*4f1223e8SApple OSS Distributions if (!workq_thread_is_permanently_bound(uth)) {
964*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
965*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
966*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_END,
967*4f1223e8SApple OSS Distributions wq, wq->wq_thidlecount, 0, 0);
968*4f1223e8SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
969*4f1223e8SApple OSS Distributions }
970*4f1223e8SApple OSS Distributions }
971*4f1223e8SApple OSS Distributions if (wq->wq_nthreads-- == wq_max_threads) {
972*4f1223e8SApple OSS Distributions /*
973*4f1223e8SApple OSS Distributions * We got under the thread limit again, which may have prevented
974*4f1223e8SApple OSS Distributions * thread creation from happening, redrive if there are pending requests
975*4f1223e8SApple OSS Distributions */
976*4f1223e8SApple OSS Distributions if (wq->wq_reqcount) {
977*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
978*4f1223e8SApple OSS Distributions }
979*4f1223e8SApple OSS Distributions }
980*4f1223e8SApple OSS Distributions workq_unlock(wq);
981*4f1223e8SApple OSS Distributions
982*4f1223e8SApple OSS Distributions thread_deallocate(get_machthread(uth));
983*4f1223e8SApple OSS Distributions }
984*4f1223e8SApple OSS Distributions
985*4f1223e8SApple OSS Distributions static void
workq_kill_old_threads_call(void * param0,void * param1 __unused)986*4f1223e8SApple OSS Distributions workq_kill_old_threads_call(void *param0, void *param1 __unused)
987*4f1223e8SApple OSS Distributions {
988*4f1223e8SApple OSS Distributions struct workqueue *wq = param0;
989*4f1223e8SApple OSS Distributions
990*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
991*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_START, wq, 0, 0, 0);
992*4f1223e8SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, WQ_DEATH_CALL_SCHEDULED, relaxed);
993*4f1223e8SApple OSS Distributions workq_death_policy_evaluate(wq, 0);
994*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_death_call | DBG_FUNC_END, wq, 0, 0, 0);
995*4f1223e8SApple OSS Distributions workq_unlock(wq);
996*4f1223e8SApple OSS Distributions }
997*4f1223e8SApple OSS Distributions
998*4f1223e8SApple OSS Distributions static struct uthread *
workq_pop_idle_thread(struct workqueue * wq,uint16_t uu_flags,bool * needs_wakeup)999*4f1223e8SApple OSS Distributions workq_pop_idle_thread(struct workqueue *wq, uint16_t uu_flags,
1000*4f1223e8SApple OSS Distributions bool *needs_wakeup)
1001*4f1223e8SApple OSS Distributions {
1002*4f1223e8SApple OSS Distributions struct uthread *uth;
1003*4f1223e8SApple OSS Distributions
1004*4f1223e8SApple OSS Distributions if ((uth = TAILQ_FIRST(&wq->wq_thidlelist))) {
1005*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1006*4f1223e8SApple OSS Distributions } else {
1007*4f1223e8SApple OSS Distributions uth = TAILQ_FIRST(&wq->wq_thnewlist);
1008*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1009*4f1223e8SApple OSS Distributions }
1010*4f1223e8SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1011*4f1223e8SApple OSS Distributions
1012*4f1223e8SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_RUNNING) == 0);
1013*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING | uu_flags;
1014*4f1223e8SApple OSS Distributions
1015*4f1223e8SApple OSS Distributions /* A thread is never woken up as part of the cooperative pool */
1016*4f1223e8SApple OSS Distributions assert((uu_flags & UT_WORKQ_COOPERATIVE) == 0);
1017*4f1223e8SApple OSS Distributions
1018*4f1223e8SApple OSS Distributions if ((uu_flags & UT_WORKQ_OVERCOMMIT) == 0) {
1019*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
1020*4f1223e8SApple OSS Distributions }
1021*4f1223e8SApple OSS Distributions wq->wq_threads_scheduled++;
1022*4f1223e8SApple OSS Distributions wq->wq_thidlecount--;
1023*4f1223e8SApple OSS Distributions
1024*4f1223e8SApple OSS Distributions if (__improbable(uth->uu_workq_flags & UT_WORKQ_DYING)) {
1025*4f1223e8SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_DYING;
1026*4f1223e8SApple OSS Distributions workq_death_policy_evaluate(wq, 1);
1027*4f1223e8SApple OSS Distributions *needs_wakeup = false;
1028*4f1223e8SApple OSS Distributions } else if (uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) {
1029*4f1223e8SApple OSS Distributions *needs_wakeup = false;
1030*4f1223e8SApple OSS Distributions } else {
1031*4f1223e8SApple OSS Distributions *needs_wakeup = true;
1032*4f1223e8SApple OSS Distributions }
1033*4f1223e8SApple OSS Distributions return uth;
1034*4f1223e8SApple OSS Distributions }
1035*4f1223e8SApple OSS Distributions
1036*4f1223e8SApple OSS Distributions /*
1037*4f1223e8SApple OSS Distributions * Called by thread_create_workq_waiting() during thread initialization, before
1038*4f1223e8SApple OSS Distributions * assert_wait, before the thread has been started.
1039*4f1223e8SApple OSS Distributions */
1040*4f1223e8SApple OSS Distributions event_t
workq_thread_init_and_wq_lock(task_t task,thread_t th)1041*4f1223e8SApple OSS Distributions workq_thread_init_and_wq_lock(task_t task, thread_t th)
1042*4f1223e8SApple OSS Distributions {
1043*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1044*4f1223e8SApple OSS Distributions
1045*4f1223e8SApple OSS Distributions uth->uu_workq_flags = UT_WORKQ_NEW;
1046*4f1223e8SApple OSS Distributions uth->uu_workq_pri = WORKQ_POLICY_INIT(THREAD_QOS_LEGACY);
1047*4f1223e8SApple OSS Distributions uth->uu_workq_thport = MACH_PORT_NULL;
1048*4f1223e8SApple OSS Distributions uth->uu_workq_stackaddr = 0;
1049*4f1223e8SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = 0;
1050*4f1223e8SApple OSS Distributions
1051*4f1223e8SApple OSS Distributions thread_set_tag(th, THREAD_TAG_PTHREAD | THREAD_TAG_WORKQUEUE);
1052*4f1223e8SApple OSS Distributions thread_reset_workq_qos(th, THREAD_QOS_LEGACY);
1053*4f1223e8SApple OSS Distributions
1054*4f1223e8SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(get_bsdtask_info(task)));
1055*4f1223e8SApple OSS Distributions return workq_parked_wait_event(uth);
1056*4f1223e8SApple OSS Distributions }
1057*4f1223e8SApple OSS Distributions
1058*4f1223e8SApple OSS Distributions /**
1059*4f1223e8SApple OSS Distributions * Try to add a new workqueue thread.
1060*4f1223e8SApple OSS Distributions *
1061*4f1223e8SApple OSS Distributions * - called with workq lock held
1062*4f1223e8SApple OSS Distributions * - dropped and retaken around thread creation
1063*4f1223e8SApple OSS Distributions * - return with workq lock held
1064*4f1223e8SApple OSS Distributions */
1065*4f1223e8SApple OSS Distributions static kern_return_t
workq_add_new_idle_thread(proc_t p,struct workqueue * wq,thread_continue_t continuation,bool is_permanently_bound,thread_t * new_thread)1066*4f1223e8SApple OSS Distributions workq_add_new_idle_thread(
1067*4f1223e8SApple OSS Distributions proc_t p,
1068*4f1223e8SApple OSS Distributions struct workqueue *wq,
1069*4f1223e8SApple OSS Distributions thread_continue_t continuation,
1070*4f1223e8SApple OSS Distributions bool is_permanently_bound,
1071*4f1223e8SApple OSS Distributions thread_t *new_thread)
1072*4f1223e8SApple OSS Distributions {
1073*4f1223e8SApple OSS Distributions mach_vm_offset_t th_stackaddr;
1074*4f1223e8SApple OSS Distributions kern_return_t kret;
1075*4f1223e8SApple OSS Distributions thread_t th;
1076*4f1223e8SApple OSS Distributions
1077*4f1223e8SApple OSS Distributions wq->wq_nthreads++;
1078*4f1223e8SApple OSS Distributions
1079*4f1223e8SApple OSS Distributions workq_unlock(wq);
1080*4f1223e8SApple OSS Distributions
1081*4f1223e8SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1082*4f1223e8SApple OSS Distributions
1083*4f1223e8SApple OSS Distributions kret = pthread_functions->workq_create_threadstack(p, vmap, &th_stackaddr);
1084*4f1223e8SApple OSS Distributions if (kret != KERN_SUCCESS) {
1085*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1086*4f1223e8SApple OSS Distributions kret, 1, 0);
1087*4f1223e8SApple OSS Distributions goto out;
1088*4f1223e8SApple OSS Distributions }
1089*4f1223e8SApple OSS Distributions
1090*4f1223e8SApple OSS Distributions kret = thread_create_workq_waiting(proc_task(p),
1091*4f1223e8SApple OSS Distributions continuation,
1092*4f1223e8SApple OSS Distributions &th,
1093*4f1223e8SApple OSS Distributions is_permanently_bound);
1094*4f1223e8SApple OSS Distributions if (kret != KERN_SUCCESS) {
1095*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create_failed | DBG_FUNC_NONE, wq,
1096*4f1223e8SApple OSS Distributions kret, 0, 0);
1097*4f1223e8SApple OSS Distributions pthread_functions->workq_destroy_threadstack(p, vmap, th_stackaddr);
1098*4f1223e8SApple OSS Distributions goto out;
1099*4f1223e8SApple OSS Distributions }
1100*4f1223e8SApple OSS Distributions
1101*4f1223e8SApple OSS Distributions // thread_create_workq_waiting() will return with the wq lock held
1102*4f1223e8SApple OSS Distributions // on success, because it calls workq_thread_init_and_wq_lock() above
1103*4f1223e8SApple OSS Distributions
1104*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
1105*4f1223e8SApple OSS Distributions uth->uu_workq_stackaddr = (user_addr_t)th_stackaddr;
1106*4f1223e8SApple OSS Distributions
1107*4f1223e8SApple OSS Distributions wq->wq_creations++;
1108*4f1223e8SApple OSS Distributions if (!is_permanently_bound) {
1109*4f1223e8SApple OSS Distributions wq->wq_thidlecount++;
1110*4f1223e8SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1111*4f1223e8SApple OSS Distributions }
1112*4f1223e8SApple OSS Distributions
1113*4f1223e8SApple OSS Distributions if (new_thread) {
1114*4f1223e8SApple OSS Distributions *new_thread = th;
1115*4f1223e8SApple OSS Distributions }
1116*4f1223e8SApple OSS Distributions
1117*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_create | DBG_FUNC_NONE, wq, 0, 0, 0);
1118*4f1223e8SApple OSS Distributions return kret;
1119*4f1223e8SApple OSS Distributions
1120*4f1223e8SApple OSS Distributions out:
1121*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
1122*4f1223e8SApple OSS Distributions /*
1123*4f1223e8SApple OSS Distributions * Do not redrive here if we went under wq_max_threads again,
1124*4f1223e8SApple OSS Distributions * it is the responsibility of the callers of this function
1125*4f1223e8SApple OSS Distributions * to do so when it fails.
1126*4f1223e8SApple OSS Distributions */
1127*4f1223e8SApple OSS Distributions wq->wq_nthreads--;
1128*4f1223e8SApple OSS Distributions return kret;
1129*4f1223e8SApple OSS Distributions }
1130*4f1223e8SApple OSS Distributions
1131*4f1223e8SApple OSS Distributions static inline bool
workq_thread_is_overcommit(struct uthread * uth)1132*4f1223e8SApple OSS Distributions workq_thread_is_overcommit(struct uthread *uth)
1133*4f1223e8SApple OSS Distributions {
1134*4f1223e8SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_OVERCOMMIT) != 0;
1135*4f1223e8SApple OSS Distributions }
1136*4f1223e8SApple OSS Distributions
1137*4f1223e8SApple OSS Distributions static inline bool
workq_thread_is_nonovercommit(struct uthread * uth)1138*4f1223e8SApple OSS Distributions workq_thread_is_nonovercommit(struct uthread *uth)
1139*4f1223e8SApple OSS Distributions {
1140*4f1223e8SApple OSS Distributions return (uth->uu_workq_flags & (UT_WORKQ_OVERCOMMIT |
1141*4f1223e8SApple OSS Distributions UT_WORKQ_COOPERATIVE)) == 0;
1142*4f1223e8SApple OSS Distributions }
1143*4f1223e8SApple OSS Distributions
1144*4f1223e8SApple OSS Distributions static inline bool
workq_thread_is_cooperative(struct uthread * uth)1145*4f1223e8SApple OSS Distributions workq_thread_is_cooperative(struct uthread *uth)
1146*4f1223e8SApple OSS Distributions {
1147*4f1223e8SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_COOPERATIVE) != 0;
1148*4f1223e8SApple OSS Distributions }
1149*4f1223e8SApple OSS Distributions
1150*4f1223e8SApple OSS Distributions bool
workq_thread_is_permanently_bound(struct uthread * uth)1151*4f1223e8SApple OSS Distributions workq_thread_is_permanently_bound(struct uthread *uth)
1152*4f1223e8SApple OSS Distributions {
1153*4f1223e8SApple OSS Distributions return (uth->uu_workq_flags & UT_WORKQ_PERMANENT_BIND) != 0;
1154*4f1223e8SApple OSS Distributions }
1155*4f1223e8SApple OSS Distributions
1156*4f1223e8SApple OSS Distributions static inline void
workq_thread_set_type(struct uthread * uth,uint16_t flags)1157*4f1223e8SApple OSS Distributions workq_thread_set_type(struct uthread *uth, uint16_t flags)
1158*4f1223e8SApple OSS Distributions {
1159*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1160*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= flags;
1161*4f1223e8SApple OSS Distributions }
1162*4f1223e8SApple OSS Distributions
1163*4f1223e8SApple OSS Distributions
1164*4f1223e8SApple OSS Distributions #define WORKQ_UNPARK_FOR_DEATH_WAS_IDLE 0x1
1165*4f1223e8SApple OSS Distributions
1166*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
1167*4f1223e8SApple OSS Distributions static void
workq_unpark_for_death_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t death_flags,uint32_t setup_flags)1168*4f1223e8SApple OSS Distributions workq_unpark_for_death_and_unlock(proc_t p, struct workqueue *wq,
1169*4f1223e8SApple OSS Distributions struct uthread *uth, uint32_t death_flags, uint32_t setup_flags)
1170*4f1223e8SApple OSS Distributions {
1171*4f1223e8SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
1172*4f1223e8SApple OSS Distributions bool first_use = uth->uu_workq_flags & UT_WORKQ_NEW;
1173*4f1223e8SApple OSS Distributions
1174*4f1223e8SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
1175*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
1176*4f1223e8SApple OSS Distributions qos = WORKQ_THREAD_QOS_CLEANUP;
1177*4f1223e8SApple OSS Distributions }
1178*4f1223e8SApple OSS Distributions
1179*4f1223e8SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
1180*4f1223e8SApple OSS Distributions
1181*4f1223e8SApple OSS Distributions if (death_flags & WORKQ_UNPARK_FOR_DEATH_WAS_IDLE) {
1182*4f1223e8SApple OSS Distributions wq->wq_thidlecount--;
1183*4f1223e8SApple OSS Distributions if (first_use) {
1184*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thnewlist, uth, uu_workq_entry);
1185*4f1223e8SApple OSS Distributions } else {
1186*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, uth, uu_workq_entry);
1187*4f1223e8SApple OSS Distributions }
1188*4f1223e8SApple OSS Distributions }
1189*4f1223e8SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thrunlist, uth, uu_workq_entry);
1190*4f1223e8SApple OSS Distributions
1191*4f1223e8SApple OSS Distributions workq_unlock(wq);
1192*4f1223e8SApple OSS Distributions
1193*4f1223e8SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
1194*4f1223e8SApple OSS Distributions __assert_only kern_return_t kr;
1195*4f1223e8SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
1196*4f1223e8SApple OSS Distributions assert(kr == KERN_SUCCESS);
1197*4f1223e8SApple OSS Distributions }
1198*4f1223e8SApple OSS Distributions
1199*4f1223e8SApple OSS Distributions uint32_t flags = WQ_FLAG_THREAD_NEWSPI | qos | WQ_FLAG_THREAD_PRIO_QOS;
1200*4f1223e8SApple OSS Distributions thread_t th = get_machthread(uth);
1201*4f1223e8SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
1202*4f1223e8SApple OSS Distributions
1203*4f1223e8SApple OSS Distributions if (!first_use) {
1204*4f1223e8SApple OSS Distributions flags |= WQ_FLAG_THREAD_REUSE;
1205*4f1223e8SApple OSS Distributions }
1206*4f1223e8SApple OSS Distributions
1207*4f1223e8SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
1208*4f1223e8SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, flags);
1209*4f1223e8SApple OSS Distributions __builtin_unreachable();
1210*4f1223e8SApple OSS Distributions }
1211*4f1223e8SApple OSS Distributions
1212*4f1223e8SApple OSS Distributions bool
workq_is_current_thread_updating_turnstile(struct workqueue * wq)1213*4f1223e8SApple OSS Distributions workq_is_current_thread_updating_turnstile(struct workqueue *wq)
1214*4f1223e8SApple OSS Distributions {
1215*4f1223e8SApple OSS Distributions return wq->wq_turnstile_updater == current_thread();
1216*4f1223e8SApple OSS Distributions }
1217*4f1223e8SApple OSS Distributions
1218*4f1223e8SApple OSS Distributions __attribute__((always_inline))
1219*4f1223e8SApple OSS Distributions static inline void
1220*4f1223e8SApple OSS Distributions workq_perform_turnstile_operation_locked(struct workqueue *wq,
1221*4f1223e8SApple OSS Distributions void (^operation)(void))
1222*4f1223e8SApple OSS Distributions {
1223*4f1223e8SApple OSS Distributions workq_lock_held(wq);
1224*4f1223e8SApple OSS Distributions wq->wq_turnstile_updater = current_thread();
1225*4f1223e8SApple OSS Distributions operation();
1226*4f1223e8SApple OSS Distributions wq->wq_turnstile_updater = THREAD_NULL;
1227*4f1223e8SApple OSS Distributions }
1228*4f1223e8SApple OSS Distributions
1229*4f1223e8SApple OSS Distributions static void
workq_turnstile_update_inheritor(struct workqueue * wq,turnstile_inheritor_t inheritor,turnstile_update_flags_t flags)1230*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(struct workqueue *wq,
1231*4f1223e8SApple OSS Distributions turnstile_inheritor_t inheritor,
1232*4f1223e8SApple OSS Distributions turnstile_update_flags_t flags)
1233*4f1223e8SApple OSS Distributions {
1234*4f1223e8SApple OSS Distributions if (wq->wq_inheritor == inheritor) {
1235*4f1223e8SApple OSS Distributions return;
1236*4f1223e8SApple OSS Distributions }
1237*4f1223e8SApple OSS Distributions wq->wq_inheritor = inheritor;
1238*4f1223e8SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
1239*4f1223e8SApple OSS Distributions turnstile_update_inheritor(wq->wq_turnstile, inheritor,
1240*4f1223e8SApple OSS Distributions flags | TURNSTILE_IMMEDIATE_UPDATE);
1241*4f1223e8SApple OSS Distributions turnstile_update_inheritor_complete(wq->wq_turnstile,
1242*4f1223e8SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
1243*4f1223e8SApple OSS Distributions });
1244*4f1223e8SApple OSS Distributions }
1245*4f1223e8SApple OSS Distributions
1246*4f1223e8SApple OSS Distributions static void
workq_push_idle_thread(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)1247*4f1223e8SApple OSS Distributions workq_push_idle_thread(proc_t p, struct workqueue *wq, struct uthread *uth,
1248*4f1223e8SApple OSS Distributions uint32_t setup_flags)
1249*4f1223e8SApple OSS Distributions {
1250*4f1223e8SApple OSS Distributions uint64_t now = mach_absolute_time();
1251*4f1223e8SApple OSS Distributions bool is_creator = (uth == wq->wq_creator);
1252*4f1223e8SApple OSS Distributions
1253*4f1223e8SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
1254*4f1223e8SApple OSS Distributions assert(!is_creator);
1255*4f1223e8SApple OSS Distributions
1256*4f1223e8SApple OSS Distributions thread_qos_t thread_qos = uth->uu_workq_pri.qos_req;
1257*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, thread_qos);
1258*4f1223e8SApple OSS Distributions
1259*4f1223e8SApple OSS Distributions /* Before we get here, we always go through
1260*4f1223e8SApple OSS Distributions * workq_select_threadreq_or_park_and_unlock. If we got here, it means
1261*4f1223e8SApple OSS Distributions * that we went through the logic in workq_threadreq_select which
1262*4f1223e8SApple OSS Distributions * did the refresh for the next best cooperative qos while
1263*4f1223e8SApple OSS Distributions * excluding the current thread - we shouldn't need to do it again.
1264*4f1223e8SApple OSS Distributions */
1265*4f1223e8SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
1266*4f1223e8SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
1267*4f1223e8SApple OSS Distributions assert(!is_creator);
1268*4f1223e8SApple OSS Distributions
1269*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
1270*4f1223e8SApple OSS Distributions }
1271*4f1223e8SApple OSS Distributions
1272*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING | UT_WORKQ_OVERCOMMIT | UT_WORKQ_COOPERATIVE);
1273*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thrunlist, uth, uu_workq_entry);
1274*4f1223e8SApple OSS Distributions wq->wq_threads_scheduled--;
1275*4f1223e8SApple OSS Distributions
1276*4f1223e8SApple OSS Distributions if (is_creator) {
1277*4f1223e8SApple OSS Distributions wq->wq_creator = NULL;
1278*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 3, 0,
1279*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
1280*4f1223e8SApple OSS Distributions }
1281*4f1223e8SApple OSS Distributions
1282*4f1223e8SApple OSS Distributions if (wq->wq_inheritor == get_machthread(uth)) {
1283*4f1223e8SApple OSS Distributions assert(wq->wq_creator == NULL);
1284*4f1223e8SApple OSS Distributions if (wq->wq_reqcount) {
1285*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
1286*4f1223e8SApple OSS Distributions } else {
1287*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
1288*4f1223e8SApple OSS Distributions }
1289*4f1223e8SApple OSS Distributions }
1290*4f1223e8SApple OSS Distributions
1291*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
1292*4f1223e8SApple OSS Distributions assert(is_creator || (_wq_flags(wq) & WQ_EXITING));
1293*4f1223e8SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thnewlist, uth, uu_workq_entry);
1294*4f1223e8SApple OSS Distributions wq->wq_thidlecount++;
1295*4f1223e8SApple OSS Distributions return;
1296*4f1223e8SApple OSS Distributions }
1297*4f1223e8SApple OSS Distributions
1298*4f1223e8SApple OSS Distributions if (!is_creator) {
1299*4f1223e8SApple OSS Distributions _wq_thactive_dec(wq, uth->uu_workq_pri.qos_bucket);
1300*4f1223e8SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(uth->uu_workq_pri.qos_bucket)]--;
1301*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_IDLE_CLEANUP;
1302*4f1223e8SApple OSS Distributions }
1303*4f1223e8SApple OSS Distributions
1304*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.idle_stamp = now;
1305*4f1223e8SApple OSS Distributions
1306*4f1223e8SApple OSS Distributions struct uthread *oldest = workq_oldest_killable_idle_thread(wq);
1307*4f1223e8SApple OSS Distributions uint16_t cur_idle = wq->wq_thidlecount;
1308*4f1223e8SApple OSS Distributions
1309*4f1223e8SApple OSS Distributions if (cur_idle >= wq_max_constrained_threads ||
1310*4f1223e8SApple OSS Distributions (wq->wq_thdying_count == 0 && oldest &&
1311*4f1223e8SApple OSS Distributions workq_should_kill_idle_thread(wq, oldest, now))) {
1312*4f1223e8SApple OSS Distributions /*
1313*4f1223e8SApple OSS Distributions * Immediately kill threads if we have too may of them.
1314*4f1223e8SApple OSS Distributions *
1315*4f1223e8SApple OSS Distributions * And swap "place" with the oldest one we'd have woken up.
1316*4f1223e8SApple OSS Distributions * This is a relatively desperate situation where we really
1317*4f1223e8SApple OSS Distributions * need to kill threads quickly and it's best to kill
1318*4f1223e8SApple OSS Distributions * the one that's currently on core than context switching.
1319*4f1223e8SApple OSS Distributions */
1320*4f1223e8SApple OSS Distributions if (oldest) {
1321*4f1223e8SApple OSS Distributions oldest->uu_save.uus_workq_park_data.idle_stamp = now;
1322*4f1223e8SApple OSS Distributions TAILQ_REMOVE(&wq->wq_thidlelist, oldest, uu_workq_entry);
1323*4f1223e8SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, oldest, uu_workq_entry);
1324*4f1223e8SApple OSS Distributions }
1325*4f1223e8SApple OSS Distributions
1326*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_terminate | DBG_FUNC_START,
1327*4f1223e8SApple OSS Distributions wq, cur_idle, 0, 0);
1328*4f1223e8SApple OSS Distributions wq->wq_thdying_count++;
1329*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
1330*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
1331*4f1223e8SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth, 0, setup_flags);
1332*4f1223e8SApple OSS Distributions __builtin_unreachable();
1333*4f1223e8SApple OSS Distributions }
1334*4f1223e8SApple OSS Distributions
1335*4f1223e8SApple OSS Distributions struct uthread *tail = TAILQ_LAST(&wq->wq_thidlelist, workq_uthread_head);
1336*4f1223e8SApple OSS Distributions
1337*4f1223e8SApple OSS Distributions cur_idle += 1;
1338*4f1223e8SApple OSS Distributions wq->wq_thidlecount = cur_idle;
1339*4f1223e8SApple OSS Distributions
1340*4f1223e8SApple OSS Distributions if (cur_idle >= wq_death_max_load && tail &&
1341*4f1223e8SApple OSS Distributions tail->uu_save.uus_workq_park_data.has_stack) {
1342*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = false;
1343*4f1223e8SApple OSS Distributions TAILQ_INSERT_TAIL(&wq->wq_thidlelist, uth, uu_workq_entry);
1344*4f1223e8SApple OSS Distributions } else {
1345*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.has_stack = true;
1346*4f1223e8SApple OSS Distributions TAILQ_INSERT_HEAD(&wq->wq_thidlelist, uth, uu_workq_entry);
1347*4f1223e8SApple OSS Distributions }
1348*4f1223e8SApple OSS Distributions
1349*4f1223e8SApple OSS Distributions if (!tail) {
1350*4f1223e8SApple OSS Distributions uint64_t delay = workq_kill_delay_for_idle_thread(wq);
1351*4f1223e8SApple OSS Distributions workq_death_call_schedule(wq, now + delay);
1352*4f1223e8SApple OSS Distributions }
1353*4f1223e8SApple OSS Distributions }
1354*4f1223e8SApple OSS Distributions
1355*4f1223e8SApple OSS Distributions #pragma mark thread requests
1356*4f1223e8SApple OSS Distributions
1357*4f1223e8SApple OSS Distributions static inline bool
workq_tr_is_overcommit(workq_tr_flags_t tr_flags)1358*4f1223e8SApple OSS Distributions workq_tr_is_overcommit(workq_tr_flags_t tr_flags)
1359*4f1223e8SApple OSS Distributions {
1360*4f1223e8SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_OVERCOMMIT) != 0;
1361*4f1223e8SApple OSS Distributions }
1362*4f1223e8SApple OSS Distributions
1363*4f1223e8SApple OSS Distributions static inline bool
workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)1364*4f1223e8SApple OSS Distributions workq_tr_is_nonovercommit(workq_tr_flags_t tr_flags)
1365*4f1223e8SApple OSS Distributions {
1366*4f1223e8SApple OSS Distributions return (tr_flags & (WORKQ_TR_FLAG_OVERCOMMIT |
1367*4f1223e8SApple OSS Distributions WORKQ_TR_FLAG_COOPERATIVE |
1368*4f1223e8SApple OSS Distributions WORKQ_TR_FLAG_PERMANENT_BIND)) == 0;
1369*4f1223e8SApple OSS Distributions }
1370*4f1223e8SApple OSS Distributions
1371*4f1223e8SApple OSS Distributions static inline bool
workq_tr_is_cooperative(workq_tr_flags_t tr_flags)1372*4f1223e8SApple OSS Distributions workq_tr_is_cooperative(workq_tr_flags_t tr_flags)
1373*4f1223e8SApple OSS Distributions {
1374*4f1223e8SApple OSS Distributions return (tr_flags & WORKQ_TR_FLAG_COOPERATIVE) != 0;
1375*4f1223e8SApple OSS Distributions }
1376*4f1223e8SApple OSS Distributions
1377*4f1223e8SApple OSS Distributions #define workq_threadreq_is_overcommit(req) workq_tr_is_overcommit((req)->tr_flags)
1378*4f1223e8SApple OSS Distributions #define workq_threadreq_is_nonovercommit(req) workq_tr_is_nonovercommit((req)->tr_flags)
1379*4f1223e8SApple OSS Distributions #define workq_threadreq_is_cooperative(req) workq_tr_is_cooperative((req)->tr_flags)
1380*4f1223e8SApple OSS Distributions
1381*4f1223e8SApple OSS Distributions static inline int
workq_priority_for_req(workq_threadreq_t req)1382*4f1223e8SApple OSS Distributions workq_priority_for_req(workq_threadreq_t req)
1383*4f1223e8SApple OSS Distributions {
1384*4f1223e8SApple OSS Distributions thread_qos_t qos = req->tr_qos;
1385*4f1223e8SApple OSS Distributions
1386*4f1223e8SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1387*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
1388*4f1223e8SApple OSS Distributions assert(trp.trp_flags & TRP_PRIORITY);
1389*4f1223e8SApple OSS Distributions return trp.trp_pri;
1390*4f1223e8SApple OSS Distributions }
1391*4f1223e8SApple OSS Distributions return thread_workq_pri_for_qos(qos);
1392*4f1223e8SApple OSS Distributions }
1393*4f1223e8SApple OSS Distributions
1394*4f1223e8SApple OSS Distributions static inline struct priority_queue_sched_max *
workq_priority_queue_for_req(struct workqueue * wq,workq_threadreq_t req)1395*4f1223e8SApple OSS Distributions workq_priority_queue_for_req(struct workqueue *wq, workq_threadreq_t req)
1396*4f1223e8SApple OSS Distributions {
1397*4f1223e8SApple OSS Distributions assert(!workq_tr_is_cooperative(req->tr_flags));
1398*4f1223e8SApple OSS Distributions
1399*4f1223e8SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
1400*4f1223e8SApple OSS Distributions return &wq->wq_special_queue;
1401*4f1223e8SApple OSS Distributions } else if (workq_tr_is_overcommit(req->tr_flags)) {
1402*4f1223e8SApple OSS Distributions return &wq->wq_overcommit_queue;
1403*4f1223e8SApple OSS Distributions } else {
1404*4f1223e8SApple OSS Distributions return &wq->wq_constrained_queue;
1405*4f1223e8SApple OSS Distributions }
1406*4f1223e8SApple OSS Distributions }
1407*4f1223e8SApple OSS Distributions
1408*4f1223e8SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1409*4f1223e8SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos_internal(struct workqueue * wq,thread_qos_t qos)1410*4f1223e8SApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos_internal(struct workqueue *wq, thread_qos_t qos)
1411*4f1223e8SApple OSS Distributions {
1412*4f1223e8SApple OSS Distributions uint64_t num_cooperative_threads = 0;
1413*4f1223e8SApple OSS Distributions
1414*4f1223e8SApple OSS Distributions for (thread_qos_t cur_qos = WORKQ_THREAD_QOS_MAX; cur_qos >= qos; cur_qos--) {
1415*4f1223e8SApple OSS Distributions uint8_t bucket = _wq_bucket(cur_qos);
1416*4f1223e8SApple OSS Distributions num_cooperative_threads += wq->wq_cooperative_queue_scheduled_count[bucket];
1417*4f1223e8SApple OSS Distributions }
1418*4f1223e8SApple OSS Distributions
1419*4f1223e8SApple OSS Distributions return num_cooperative_threads;
1420*4f1223e8SApple OSS Distributions }
1421*4f1223e8SApple OSS Distributions
1422*4f1223e8SApple OSS Distributions /* Calculates the number of threads scheduled >= the input QoS */
1423*4f1223e8SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_to_qos_locked(struct workqueue * wq,thread_qos_t qos)1424*4f1223e8SApple OSS Distributions workq_num_cooperative_threads_scheduled_to_qos_locked(struct workqueue *wq, thread_qos_t qos)
1425*4f1223e8SApple OSS Distributions {
1426*4f1223e8SApple OSS Distributions workq_lock_held(wq);
1427*4f1223e8SApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos_internal(wq, qos);
1428*4f1223e8SApple OSS Distributions }
1429*4f1223e8SApple OSS Distributions
1430*4f1223e8SApple OSS Distributions static uint64_t
workq_num_cooperative_threads_scheduled_total(struct workqueue * wq)1431*4f1223e8SApple OSS Distributions workq_num_cooperative_threads_scheduled_total(struct workqueue *wq)
1432*4f1223e8SApple OSS Distributions {
1433*4f1223e8SApple OSS Distributions return workq_num_cooperative_threads_scheduled_to_qos_locked(wq, WORKQ_THREAD_QOS_MIN);
1434*4f1223e8SApple OSS Distributions }
1435*4f1223e8SApple OSS Distributions
1436*4f1223e8SApple OSS Distributions static bool
workq_has_cooperative_thread_requests(struct workqueue * wq)1437*4f1223e8SApple OSS Distributions workq_has_cooperative_thread_requests(struct workqueue *wq)
1438*4f1223e8SApple OSS Distributions {
1439*4f1223e8SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1440*4f1223e8SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1441*4f1223e8SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1442*4f1223e8SApple OSS Distributions return true;
1443*4f1223e8SApple OSS Distributions }
1444*4f1223e8SApple OSS Distributions }
1445*4f1223e8SApple OSS Distributions
1446*4f1223e8SApple OSS Distributions return false;
1447*4f1223e8SApple OSS Distributions }
1448*4f1223e8SApple OSS Distributions
1449*4f1223e8SApple OSS Distributions /*
1450*4f1223e8SApple OSS Distributions * Determines the next QoS bucket we should service next in the cooperative
1451*4f1223e8SApple OSS Distributions * pool. This function will always return a QoS for cooperative pool as long as
1452*4f1223e8SApple OSS Distributions * there are requests to be serviced.
1453*4f1223e8SApple OSS Distributions *
1454*4f1223e8SApple OSS Distributions * Unlike the other thread pools, for the cooperative thread pool the schedule
1455*4f1223e8SApple OSS Distributions * counts for the various buckets in the pool affect the next best request for
1456*4f1223e8SApple OSS Distributions * it.
1457*4f1223e8SApple OSS Distributions *
1458*4f1223e8SApple OSS Distributions * This function is called in the following contexts:
1459*4f1223e8SApple OSS Distributions *
1460*4f1223e8SApple OSS Distributions * a) When determining the best thread QoS for cooperative bucket for the
1461*4f1223e8SApple OSS Distributions * creator/thread reuse
1462*4f1223e8SApple OSS Distributions *
1463*4f1223e8SApple OSS Distributions * b) Once (a) has happened and thread has bound to a thread request, figuring
1464*4f1223e8SApple OSS Distributions * out whether the next best request for this pool has changed so that creator
1465*4f1223e8SApple OSS Distributions * can be scheduled.
1466*4f1223e8SApple OSS Distributions *
1467*4f1223e8SApple OSS Distributions * Returns true if the cooperative queue's best qos changed from previous
1468*4f1223e8SApple OSS Distributions * value.
1469*4f1223e8SApple OSS Distributions */
1470*4f1223e8SApple OSS Distributions static bool
_wq_cooperative_queue_refresh_best_req_qos(struct workqueue * wq)1471*4f1223e8SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(struct workqueue *wq)
1472*4f1223e8SApple OSS Distributions {
1473*4f1223e8SApple OSS Distributions workq_lock_held(wq);
1474*4f1223e8SApple OSS Distributions
1475*4f1223e8SApple OSS Distributions thread_qos_t old_best_req_qos = wq->wq_cooperative_queue_best_req_qos;
1476*4f1223e8SApple OSS Distributions
1477*4f1223e8SApple OSS Distributions /* We determine the next best cooperative thread request based on the
1478*4f1223e8SApple OSS Distributions * following:
1479*4f1223e8SApple OSS Distributions *
1480*4f1223e8SApple OSS Distributions * 1. Take the MAX of the following:
1481*4f1223e8SApple OSS Distributions * a) Highest qos with pending TRs such that number of scheduled
1482*4f1223e8SApple OSS Distributions * threads so far with >= qos is < wq_max_cooperative_threads
1483*4f1223e8SApple OSS Distributions * b) Highest qos bucket with pending TRs but no scheduled threads for that bucket
1484*4f1223e8SApple OSS Distributions *
1485*4f1223e8SApple OSS Distributions * 2. If the result of (1) is UN, then we pick the highest priority amongst
1486*4f1223e8SApple OSS Distributions * pending thread requests in the pool.
1487*4f1223e8SApple OSS Distributions *
1488*4f1223e8SApple OSS Distributions */
1489*4f1223e8SApple OSS Distributions thread_qos_t highest_qos_with_no_scheduled = THREAD_QOS_UNSPECIFIED;
1490*4f1223e8SApple OSS Distributions thread_qos_t highest_qos_req_with_width = THREAD_QOS_UNSPECIFIED;
1491*4f1223e8SApple OSS Distributions
1492*4f1223e8SApple OSS Distributions thread_qos_t highest_qos_req = THREAD_QOS_UNSPECIFIED;
1493*4f1223e8SApple OSS Distributions
1494*4f1223e8SApple OSS Distributions int scheduled_count_till_qos = 0;
1495*4f1223e8SApple OSS Distributions
1496*4f1223e8SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MAX; qos >= WORKQ_THREAD_QOS_MIN; qos--) {
1497*4f1223e8SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1498*4f1223e8SApple OSS Distributions uint8_t scheduled_count_for_bucket = wq->wq_cooperative_queue_scheduled_count[bucket];
1499*4f1223e8SApple OSS Distributions scheduled_count_till_qos += scheduled_count_for_bucket;
1500*4f1223e8SApple OSS Distributions
1501*4f1223e8SApple OSS Distributions if (!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1502*4f1223e8SApple OSS Distributions if (qos > highest_qos_req) {
1503*4f1223e8SApple OSS Distributions highest_qos_req = qos;
1504*4f1223e8SApple OSS Distributions }
1505*4f1223e8SApple OSS Distributions /*
1506*4f1223e8SApple OSS Distributions * The pool isn't saturated for threads at and above this QoS, and
1507*4f1223e8SApple OSS Distributions * this qos bucket has pending requests
1508*4f1223e8SApple OSS Distributions */
1509*4f1223e8SApple OSS Distributions if (scheduled_count_till_qos < wq_cooperative_queue_max_size(wq)) {
1510*4f1223e8SApple OSS Distributions if (qos > highest_qos_req_with_width) {
1511*4f1223e8SApple OSS Distributions highest_qos_req_with_width = qos;
1512*4f1223e8SApple OSS Distributions }
1513*4f1223e8SApple OSS Distributions }
1514*4f1223e8SApple OSS Distributions
1515*4f1223e8SApple OSS Distributions /*
1516*4f1223e8SApple OSS Distributions * There are no threads scheduled for this bucket but there
1517*4f1223e8SApple OSS Distributions * is work pending, give it at least 1 thread
1518*4f1223e8SApple OSS Distributions */
1519*4f1223e8SApple OSS Distributions if (scheduled_count_for_bucket == 0) {
1520*4f1223e8SApple OSS Distributions if (qos > highest_qos_with_no_scheduled) {
1521*4f1223e8SApple OSS Distributions highest_qos_with_no_scheduled = qos;
1522*4f1223e8SApple OSS Distributions }
1523*4f1223e8SApple OSS Distributions }
1524*4f1223e8SApple OSS Distributions }
1525*4f1223e8SApple OSS Distributions }
1526*4f1223e8SApple OSS Distributions
1527*4f1223e8SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = MAX(highest_qos_with_no_scheduled, highest_qos_req_with_width);
1528*4f1223e8SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1529*4f1223e8SApple OSS Distributions wq->wq_cooperative_queue_best_req_qos = highest_qos_req;
1530*4f1223e8SApple OSS Distributions }
1531*4f1223e8SApple OSS Distributions
1532*4f1223e8SApple OSS Distributions #if MACH_ASSERT
1533*4f1223e8SApple OSS Distributions /* Assert that if we are showing up the next best req as UN, then there
1534*4f1223e8SApple OSS Distributions * actually is no thread request in the cooperative pool buckets */
1535*4f1223e8SApple OSS Distributions if (wq->wq_cooperative_queue_best_req_qos == THREAD_QOS_UNSPECIFIED) {
1536*4f1223e8SApple OSS Distributions assert(!workq_has_cooperative_thread_requests(wq));
1537*4f1223e8SApple OSS Distributions }
1538*4f1223e8SApple OSS Distributions #endif
1539*4f1223e8SApple OSS Distributions
1540*4f1223e8SApple OSS Distributions return old_best_req_qos != wq->wq_cooperative_queue_best_req_qos;
1541*4f1223e8SApple OSS Distributions }
1542*4f1223e8SApple OSS Distributions
1543*4f1223e8SApple OSS Distributions /*
1544*4f1223e8SApple OSS Distributions * Returns whether or not the input thread (or creator thread if uth is NULL)
1545*4f1223e8SApple OSS Distributions * should be allowed to work as part of the cooperative pool for the <input qos>
1546*4f1223e8SApple OSS Distributions * bucket.
1547*4f1223e8SApple OSS Distributions *
1548*4f1223e8SApple OSS Distributions * This function is called in a bunch of places:
1549*4f1223e8SApple OSS Distributions * a) Quantum expires for a thread and it is part of the cooperative pool
1550*4f1223e8SApple OSS Distributions * b) When trying to pick a thread request for the creator thread to
1551*4f1223e8SApple OSS Distributions * represent.
1552*4f1223e8SApple OSS Distributions * c) When a thread is trying to pick a thread request to actually bind to
1553*4f1223e8SApple OSS Distributions * and service.
1554*4f1223e8SApple OSS Distributions *
1555*4f1223e8SApple OSS Distributions * Called with workq lock held.
1556*4f1223e8SApple OSS Distributions */
1557*4f1223e8SApple OSS Distributions
1558*4f1223e8SApple OSS Distributions #define WQ_COOPERATIVE_POOL_UNSATURATED 1
1559*4f1223e8SApple OSS Distributions #define WQ_COOPERATIVE_BUCKET_UNSERVICED 2
1560*4f1223e8SApple OSS Distributions #define WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS 3
1561*4f1223e8SApple OSS Distributions
1562*4f1223e8SApple OSS Distributions static bool
workq_cooperative_allowance(struct workqueue * wq,thread_qos_t qos,struct uthread * uth,bool may_start_timer)1563*4f1223e8SApple OSS Distributions workq_cooperative_allowance(struct workqueue *wq, thread_qos_t qos, struct uthread *uth,
1564*4f1223e8SApple OSS Distributions bool may_start_timer)
1565*4f1223e8SApple OSS Distributions {
1566*4f1223e8SApple OSS Distributions workq_lock_held(wq);
1567*4f1223e8SApple OSS Distributions
1568*4f1223e8SApple OSS Distributions bool exclude_thread_as_scheduled = false;
1569*4f1223e8SApple OSS Distributions bool passed_admissions = false;
1570*4f1223e8SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
1571*4f1223e8SApple OSS Distributions
1572*4f1223e8SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
1573*4f1223e8SApple OSS Distributions exclude_thread_as_scheduled = true;
1574*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
1575*4f1223e8SApple OSS Distributions }
1576*4f1223e8SApple OSS Distributions
1577*4f1223e8SApple OSS Distributions /*
1578*4f1223e8SApple OSS Distributions * We have not saturated the pool yet, let this thread continue
1579*4f1223e8SApple OSS Distributions */
1580*4f1223e8SApple OSS Distributions uint64_t total_cooperative_threads;
1581*4f1223e8SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
1582*4f1223e8SApple OSS Distributions if (total_cooperative_threads < wq_cooperative_queue_max_size(wq)) {
1583*4f1223e8SApple OSS Distributions passed_admissions = true;
1584*4f1223e8SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1585*4f1223e8SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1586*4f1223e8SApple OSS Distributions WQ_COOPERATIVE_POOL_UNSATURATED);
1587*4f1223e8SApple OSS Distributions goto out;
1588*4f1223e8SApple OSS Distributions }
1589*4f1223e8SApple OSS Distributions
1590*4f1223e8SApple OSS Distributions /*
1591*4f1223e8SApple OSS Distributions * Without this thread, nothing is servicing the bucket which has pending
1592*4f1223e8SApple OSS Distributions * work
1593*4f1223e8SApple OSS Distributions */
1594*4f1223e8SApple OSS Distributions uint64_t bucket_scheduled = wq->wq_cooperative_queue_scheduled_count[bucket];
1595*4f1223e8SApple OSS Distributions if (bucket_scheduled == 0 &&
1596*4f1223e8SApple OSS Distributions !STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket])) {
1597*4f1223e8SApple OSS Distributions passed_admissions = true;
1598*4f1223e8SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE,
1599*4f1223e8SApple OSS Distributions total_cooperative_threads, qos, passed_admissions,
1600*4f1223e8SApple OSS Distributions WQ_COOPERATIVE_BUCKET_UNSERVICED);
1601*4f1223e8SApple OSS Distributions goto out;
1602*4f1223e8SApple OSS Distributions }
1603*4f1223e8SApple OSS Distributions
1604*4f1223e8SApple OSS Distributions /*
1605*4f1223e8SApple OSS Distributions * If number of threads at the QoS bucket >= input QoS exceeds the max we want
1606*4f1223e8SApple OSS Distributions * for the pool, deny this thread
1607*4f1223e8SApple OSS Distributions */
1608*4f1223e8SApple OSS Distributions uint64_t aggregate_down_to_qos = workq_num_cooperative_threads_scheduled_to_qos_locked(wq, qos);
1609*4f1223e8SApple OSS Distributions passed_admissions = (aggregate_down_to_qos < wq_cooperative_queue_max_size(wq));
1610*4f1223e8SApple OSS Distributions WQ_TRACE(TRACE_wq_cooperative_admission | DBG_FUNC_NONE, aggregate_down_to_qos,
1611*4f1223e8SApple OSS Distributions qos, passed_admissions, WQ_COOPERATIVE_POOL_SATURATED_UP_TO_QOS);
1612*4f1223e8SApple OSS Distributions
1613*4f1223e8SApple OSS Distributions if (!passed_admissions && may_start_timer) {
1614*4f1223e8SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
1615*4f1223e8SApple OSS Distributions }
1616*4f1223e8SApple OSS Distributions
1617*4f1223e8SApple OSS Distributions out:
1618*4f1223e8SApple OSS Distributions if (exclude_thread_as_scheduled) {
1619*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
1620*4f1223e8SApple OSS Distributions }
1621*4f1223e8SApple OSS Distributions return passed_admissions;
1622*4f1223e8SApple OSS Distributions }
1623*4f1223e8SApple OSS Distributions
1624*4f1223e8SApple OSS Distributions /*
1625*4f1223e8SApple OSS Distributions * returns true if the best request for the pool changed as a result of
1626*4f1223e8SApple OSS Distributions * enqueuing this thread request.
1627*4f1223e8SApple OSS Distributions */
1628*4f1223e8SApple OSS Distributions static bool
workq_threadreq_enqueue(struct workqueue * wq,workq_threadreq_t req)1629*4f1223e8SApple OSS Distributions workq_threadreq_enqueue(struct workqueue *wq, workq_threadreq_t req)
1630*4f1223e8SApple OSS Distributions {
1631*4f1223e8SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_NEW);
1632*4f1223e8SApple OSS Distributions
1633*4f1223e8SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_QUEUED;
1634*4f1223e8SApple OSS Distributions wq->wq_reqcount += req->tr_count;
1635*4f1223e8SApple OSS Distributions
1636*4f1223e8SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1637*4f1223e8SApple OSS Distributions assert(wq->wq_event_manager_threadreq == NULL);
1638*4f1223e8SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_KEVENT);
1639*4f1223e8SApple OSS Distributions assert(req->tr_count == 1);
1640*4f1223e8SApple OSS Distributions wq->wq_event_manager_threadreq = req;
1641*4f1223e8SApple OSS Distributions return true;
1642*4f1223e8SApple OSS Distributions }
1643*4f1223e8SApple OSS Distributions
1644*4f1223e8SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1645*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1646*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1647*4f1223e8SApple OSS Distributions
1648*4f1223e8SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1649*4f1223e8SApple OSS Distributions STAILQ_INSERT_TAIL(bucket, req, tr_link);
1650*4f1223e8SApple OSS Distributions
1651*4f1223e8SApple OSS Distributions return _wq_cooperative_queue_refresh_best_req_qos(wq);
1652*4f1223e8SApple OSS Distributions }
1653*4f1223e8SApple OSS Distributions
1654*4f1223e8SApple OSS Distributions struct priority_queue_sched_max *q = workq_priority_queue_for_req(wq, req);
1655*4f1223e8SApple OSS Distributions
1656*4f1223e8SApple OSS Distributions priority_queue_entry_set_sched_pri(q, &req->tr_entry,
1657*4f1223e8SApple OSS Distributions workq_priority_for_req(req), false);
1658*4f1223e8SApple OSS Distributions
1659*4f1223e8SApple OSS Distributions if (priority_queue_insert(q, &req->tr_entry)) {
1660*4f1223e8SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1661*4f1223e8SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1662*4f1223e8SApple OSS Distributions }
1663*4f1223e8SApple OSS Distributions return true;
1664*4f1223e8SApple OSS Distributions }
1665*4f1223e8SApple OSS Distributions return false;
1666*4f1223e8SApple OSS Distributions }
1667*4f1223e8SApple OSS Distributions
1668*4f1223e8SApple OSS Distributions /*
1669*4f1223e8SApple OSS Distributions * returns true if one of the following is true (so as to update creator if
1670*4f1223e8SApple OSS Distributions * needed):
1671*4f1223e8SApple OSS Distributions *
1672*4f1223e8SApple OSS Distributions * (a) the next highest request of the pool we dequeued the request from changed
1673*4f1223e8SApple OSS Distributions * (b) the next highest requests of the pool the current thread used to be a
1674*4f1223e8SApple OSS Distributions * part of, changed
1675*4f1223e8SApple OSS Distributions *
1676*4f1223e8SApple OSS Distributions * For overcommit, special and constrained pools, the next highest QoS for each
1677*4f1223e8SApple OSS Distributions * pool just a MAX of pending requests so tracking (a) is sufficient.
1678*4f1223e8SApple OSS Distributions *
1679*4f1223e8SApple OSS Distributions * But for cooperative thread pool, the next highest QoS for the pool depends on
1680*4f1223e8SApple OSS Distributions * schedule counts in the pool as well. So if the current thread used to be
1681*4f1223e8SApple OSS Distributions * cooperative in it's previous logical run ie (b), then that can also affect
1682*4f1223e8SApple OSS Distributions * cooperative pool's next best QoS requests.
1683*4f1223e8SApple OSS Distributions */
1684*4f1223e8SApple OSS Distributions static bool
workq_threadreq_dequeue(struct workqueue * wq,workq_threadreq_t req,bool cooperative_sched_count_changed)1685*4f1223e8SApple OSS Distributions workq_threadreq_dequeue(struct workqueue *wq, workq_threadreq_t req,
1686*4f1223e8SApple OSS Distributions bool cooperative_sched_count_changed)
1687*4f1223e8SApple OSS Distributions {
1688*4f1223e8SApple OSS Distributions wq->wq_reqcount--;
1689*4f1223e8SApple OSS Distributions
1690*4f1223e8SApple OSS Distributions bool next_highest_request_changed = false;
1691*4f1223e8SApple OSS Distributions
1692*4f1223e8SApple OSS Distributions if (--req->tr_count == 0) {
1693*4f1223e8SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
1694*4f1223e8SApple OSS Distributions assert(wq->wq_event_manager_threadreq == req);
1695*4f1223e8SApple OSS Distributions assert(req->tr_count == 0);
1696*4f1223e8SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
1697*4f1223e8SApple OSS Distributions
1698*4f1223e8SApple OSS Distributions /* If a cooperative thread was the one which picked up the manager
1699*4f1223e8SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool
1700*4f1223e8SApple OSS Distributions * anyways.
1701*4f1223e8SApple OSS Distributions */
1702*4f1223e8SApple OSS Distributions if (cooperative_sched_count_changed) {
1703*4f1223e8SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
1704*4f1223e8SApple OSS Distributions }
1705*4f1223e8SApple OSS Distributions return true;
1706*4f1223e8SApple OSS Distributions }
1707*4f1223e8SApple OSS Distributions
1708*4f1223e8SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
1709*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
1710*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_ABOVEUI);
1711*4f1223e8SApple OSS Distributions /* Account for the fact that BG and MT are coalesced when
1712*4f1223e8SApple OSS Distributions * calculating best request for cooperative pool
1713*4f1223e8SApple OSS Distributions */
1714*4f1223e8SApple OSS Distributions assert(_wq_bucket(req->tr_qos) == _wq_bucket(wq->wq_cooperative_queue_best_req_qos));
1715*4f1223e8SApple OSS Distributions
1716*4f1223e8SApple OSS Distributions struct workq_threadreq_tailq *bucket = &wq->wq_cooperative_queue[_wq_bucket(req->tr_qos)];
1717*4f1223e8SApple OSS Distributions __assert_only workq_threadreq_t head = STAILQ_FIRST(bucket);
1718*4f1223e8SApple OSS Distributions
1719*4f1223e8SApple OSS Distributions assert(head == req);
1720*4f1223e8SApple OSS Distributions STAILQ_REMOVE_HEAD(bucket, tr_link);
1721*4f1223e8SApple OSS Distributions
1722*4f1223e8SApple OSS Distributions /*
1723*4f1223e8SApple OSS Distributions * If the request we're dequeueing is cooperative, then the sched
1724*4f1223e8SApple OSS Distributions * counts definitely changed.
1725*4f1223e8SApple OSS Distributions */
1726*4f1223e8SApple OSS Distributions assert(cooperative_sched_count_changed);
1727*4f1223e8SApple OSS Distributions }
1728*4f1223e8SApple OSS Distributions
1729*4f1223e8SApple OSS Distributions /*
1730*4f1223e8SApple OSS Distributions * We want to do the cooperative pool refresh after dequeueing a
1731*4f1223e8SApple OSS Distributions * cooperative thread request if any (to combine both effects into 1
1732*4f1223e8SApple OSS Distributions * refresh operation)
1733*4f1223e8SApple OSS Distributions */
1734*4f1223e8SApple OSS Distributions if (cooperative_sched_count_changed) {
1735*4f1223e8SApple OSS Distributions next_highest_request_changed = _wq_cooperative_queue_refresh_best_req_qos(wq);
1736*4f1223e8SApple OSS Distributions }
1737*4f1223e8SApple OSS Distributions
1738*4f1223e8SApple OSS Distributions if (!workq_threadreq_is_cooperative(req)) {
1739*4f1223e8SApple OSS Distributions /*
1740*4f1223e8SApple OSS Distributions * All other types of requests are enqueued in priority queues
1741*4f1223e8SApple OSS Distributions */
1742*4f1223e8SApple OSS Distributions
1743*4f1223e8SApple OSS Distributions if (priority_queue_remove(workq_priority_queue_for_req(wq, req),
1744*4f1223e8SApple OSS Distributions &req->tr_entry)) {
1745*4f1223e8SApple OSS Distributions next_highest_request_changed |= true;
1746*4f1223e8SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
1747*4f1223e8SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
1748*4f1223e8SApple OSS Distributions }
1749*4f1223e8SApple OSS Distributions }
1750*4f1223e8SApple OSS Distributions }
1751*4f1223e8SApple OSS Distributions }
1752*4f1223e8SApple OSS Distributions
1753*4f1223e8SApple OSS Distributions return next_highest_request_changed;
1754*4f1223e8SApple OSS Distributions }
1755*4f1223e8SApple OSS Distributions
1756*4f1223e8SApple OSS Distributions static void
workq_threadreq_destroy(proc_t p,workq_threadreq_t req)1757*4f1223e8SApple OSS Distributions workq_threadreq_destroy(proc_t p, workq_threadreq_t req)
1758*4f1223e8SApple OSS Distributions {
1759*4f1223e8SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_CANCELED;
1760*4f1223e8SApple OSS Distributions if (req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT)) {
1761*4f1223e8SApple OSS Distributions kqueue_threadreq_cancel(p, req);
1762*4f1223e8SApple OSS Distributions } else {
1763*4f1223e8SApple OSS Distributions zfree(workq_zone_threadreq, req);
1764*4f1223e8SApple OSS Distributions }
1765*4f1223e8SApple OSS Distributions }
1766*4f1223e8SApple OSS Distributions
1767*4f1223e8SApple OSS Distributions #pragma mark workqueue thread creation thread calls
1768*4f1223e8SApple OSS Distributions
1769*4f1223e8SApple OSS Distributions static inline bool
workq_thread_call_prepost(struct workqueue * wq,uint32_t sched,uint32_t pend,uint32_t fail_mask)1770*4f1223e8SApple OSS Distributions workq_thread_call_prepost(struct workqueue *wq, uint32_t sched, uint32_t pend,
1771*4f1223e8SApple OSS Distributions uint32_t fail_mask)
1772*4f1223e8SApple OSS Distributions {
1773*4f1223e8SApple OSS Distributions uint32_t old_flags, new_flags;
1774*4f1223e8SApple OSS Distributions
1775*4f1223e8SApple OSS Distributions os_atomic_rmw_loop(&wq->wq_flags, old_flags, new_flags, acquire, {
1776*4f1223e8SApple OSS Distributions if (__improbable(old_flags & (WQ_EXITING | sched | pend | fail_mask))) {
1777*4f1223e8SApple OSS Distributions os_atomic_rmw_loop_give_up(return false);
1778*4f1223e8SApple OSS Distributions }
1779*4f1223e8SApple OSS Distributions if (__improbable(old_flags & WQ_PROC_SUSPENDED)) {
1780*4f1223e8SApple OSS Distributions new_flags = old_flags | pend;
1781*4f1223e8SApple OSS Distributions } else {
1782*4f1223e8SApple OSS Distributions new_flags = old_flags | sched;
1783*4f1223e8SApple OSS Distributions }
1784*4f1223e8SApple OSS Distributions });
1785*4f1223e8SApple OSS Distributions
1786*4f1223e8SApple OSS Distributions return (old_flags & WQ_PROC_SUSPENDED) == 0;
1787*4f1223e8SApple OSS Distributions }
1788*4f1223e8SApple OSS Distributions
1789*4f1223e8SApple OSS Distributions #define WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART 0x1
1790*4f1223e8SApple OSS Distributions
1791*4f1223e8SApple OSS Distributions static bool
workq_schedule_delayed_thread_creation(struct workqueue * wq,int flags)1792*4f1223e8SApple OSS Distributions workq_schedule_delayed_thread_creation(struct workqueue *wq, int flags)
1793*4f1223e8SApple OSS Distributions {
1794*4f1223e8SApple OSS Distributions assert(!preemption_enabled());
1795*4f1223e8SApple OSS Distributions
1796*4f1223e8SApple OSS Distributions if (!workq_thread_call_prepost(wq, WQ_DELAYED_CALL_SCHEDULED,
1797*4f1223e8SApple OSS Distributions WQ_DELAYED_CALL_PENDED, WQ_IMMEDIATE_CALL_PENDED |
1798*4f1223e8SApple OSS Distributions WQ_IMMEDIATE_CALL_SCHEDULED)) {
1799*4f1223e8SApple OSS Distributions return false;
1800*4f1223e8SApple OSS Distributions }
1801*4f1223e8SApple OSS Distributions
1802*4f1223e8SApple OSS Distributions uint64_t now = mach_absolute_time();
1803*4f1223e8SApple OSS Distributions
1804*4f1223e8SApple OSS Distributions if (flags & WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART) {
1805*4f1223e8SApple OSS Distributions /* do not change the window */
1806*4f1223e8SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run <= wq->wq_timer_interval) {
1807*4f1223e8SApple OSS Distributions wq->wq_timer_interval *= 2;
1808*4f1223e8SApple OSS Distributions if (wq->wq_timer_interval > wq_max_timer_interval.abstime) {
1809*4f1223e8SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_max_timer_interval.abstime;
1810*4f1223e8SApple OSS Distributions }
1811*4f1223e8SApple OSS Distributions } else if (now - wq->wq_thread_call_last_run > 2 * wq->wq_timer_interval) {
1812*4f1223e8SApple OSS Distributions wq->wq_timer_interval /= 2;
1813*4f1223e8SApple OSS Distributions if (wq->wq_timer_interval < wq_stalled_window.abstime) {
1814*4f1223e8SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
1815*4f1223e8SApple OSS Distributions }
1816*4f1223e8SApple OSS Distributions }
1817*4f1223e8SApple OSS Distributions
1818*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1819*4f1223e8SApple OSS Distributions _wq_flags(wq), wq->wq_timer_interval);
1820*4f1223e8SApple OSS Distributions
1821*4f1223e8SApple OSS Distributions thread_call_t call = wq->wq_delayed_call;
1822*4f1223e8SApple OSS Distributions uintptr_t arg = WQ_DELAYED_CALL_SCHEDULED;
1823*4f1223e8SApple OSS Distributions uint64_t deadline = now + wq->wq_timer_interval;
1824*4f1223e8SApple OSS Distributions if (thread_call_enter1_delayed(call, (void *)arg, deadline)) {
1825*4f1223e8SApple OSS Distributions panic("delayed_call was already enqueued");
1826*4f1223e8SApple OSS Distributions }
1827*4f1223e8SApple OSS Distributions return true;
1828*4f1223e8SApple OSS Distributions }
1829*4f1223e8SApple OSS Distributions
1830*4f1223e8SApple OSS Distributions static void
workq_schedule_immediate_thread_creation(struct workqueue * wq)1831*4f1223e8SApple OSS Distributions workq_schedule_immediate_thread_creation(struct workqueue *wq)
1832*4f1223e8SApple OSS Distributions {
1833*4f1223e8SApple OSS Distributions assert(!preemption_enabled());
1834*4f1223e8SApple OSS Distributions
1835*4f1223e8SApple OSS Distributions if (workq_thread_call_prepost(wq, WQ_IMMEDIATE_CALL_SCHEDULED,
1836*4f1223e8SApple OSS Distributions WQ_IMMEDIATE_CALL_PENDED, 0)) {
1837*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_start_add_timer, wq, wq->wq_reqcount,
1838*4f1223e8SApple OSS Distributions _wq_flags(wq), 0);
1839*4f1223e8SApple OSS Distributions
1840*4f1223e8SApple OSS Distributions uintptr_t arg = WQ_IMMEDIATE_CALL_SCHEDULED;
1841*4f1223e8SApple OSS Distributions if (thread_call_enter1(wq->wq_immediate_call, (void *)arg)) {
1842*4f1223e8SApple OSS Distributions panic("immediate_call was already enqueued");
1843*4f1223e8SApple OSS Distributions }
1844*4f1223e8SApple OSS Distributions }
1845*4f1223e8SApple OSS Distributions }
1846*4f1223e8SApple OSS Distributions
1847*4f1223e8SApple OSS Distributions void
workq_proc_suspended(struct proc * p)1848*4f1223e8SApple OSS Distributions workq_proc_suspended(struct proc *p)
1849*4f1223e8SApple OSS Distributions {
1850*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1851*4f1223e8SApple OSS Distributions
1852*4f1223e8SApple OSS Distributions if (wq) {
1853*4f1223e8SApple OSS Distributions os_atomic_or(&wq->wq_flags, WQ_PROC_SUSPENDED, relaxed);
1854*4f1223e8SApple OSS Distributions }
1855*4f1223e8SApple OSS Distributions }
1856*4f1223e8SApple OSS Distributions
1857*4f1223e8SApple OSS Distributions void
workq_proc_resumed(struct proc * p)1858*4f1223e8SApple OSS Distributions workq_proc_resumed(struct proc *p)
1859*4f1223e8SApple OSS Distributions {
1860*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1861*4f1223e8SApple OSS Distributions uint32_t wq_flags;
1862*4f1223e8SApple OSS Distributions
1863*4f1223e8SApple OSS Distributions if (!wq) {
1864*4f1223e8SApple OSS Distributions return;
1865*4f1223e8SApple OSS Distributions }
1866*4f1223e8SApple OSS Distributions
1867*4f1223e8SApple OSS Distributions wq_flags = os_atomic_andnot_orig(&wq->wq_flags, WQ_PROC_SUSPENDED |
1868*4f1223e8SApple OSS Distributions WQ_DELAYED_CALL_PENDED | WQ_IMMEDIATE_CALL_PENDED, relaxed);
1869*4f1223e8SApple OSS Distributions if ((wq_flags & WQ_EXITING) == 0) {
1870*4f1223e8SApple OSS Distributions disable_preemption();
1871*4f1223e8SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_PENDED) {
1872*4f1223e8SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
1873*4f1223e8SApple OSS Distributions } else if (wq_flags & WQ_DELAYED_CALL_PENDED) {
1874*4f1223e8SApple OSS Distributions workq_schedule_delayed_thread_creation(wq,
1875*4f1223e8SApple OSS Distributions WORKQ_SCHEDULE_DELAYED_THREAD_CREATION_RESTART);
1876*4f1223e8SApple OSS Distributions }
1877*4f1223e8SApple OSS Distributions enable_preemption();
1878*4f1223e8SApple OSS Distributions }
1879*4f1223e8SApple OSS Distributions }
1880*4f1223e8SApple OSS Distributions
1881*4f1223e8SApple OSS Distributions /**
1882*4f1223e8SApple OSS Distributions * returns whether lastblocked_tsp is within wq_stalled_window usecs of now
1883*4f1223e8SApple OSS Distributions */
1884*4f1223e8SApple OSS Distributions static bool
workq_thread_is_busy(uint64_t now,_Atomic uint64_t * lastblocked_tsp)1885*4f1223e8SApple OSS Distributions workq_thread_is_busy(uint64_t now, _Atomic uint64_t *lastblocked_tsp)
1886*4f1223e8SApple OSS Distributions {
1887*4f1223e8SApple OSS Distributions uint64_t lastblocked_ts = os_atomic_load_wide(lastblocked_tsp, relaxed);
1888*4f1223e8SApple OSS Distributions if (now <= lastblocked_ts) {
1889*4f1223e8SApple OSS Distributions /*
1890*4f1223e8SApple OSS Distributions * Because the update of the timestamp when a thread blocks
1891*4f1223e8SApple OSS Distributions * isn't serialized against us looking at it (i.e. we don't hold
1892*4f1223e8SApple OSS Distributions * the workq lock), it's possible to have a timestamp that matches
1893*4f1223e8SApple OSS Distributions * the current time or that even looks to be in the future relative
1894*4f1223e8SApple OSS Distributions * to when we grabbed the current time...
1895*4f1223e8SApple OSS Distributions *
1896*4f1223e8SApple OSS Distributions * Just treat this as a busy thread since it must have just blocked.
1897*4f1223e8SApple OSS Distributions */
1898*4f1223e8SApple OSS Distributions return true;
1899*4f1223e8SApple OSS Distributions }
1900*4f1223e8SApple OSS Distributions return (now - lastblocked_ts) < wq_stalled_window.abstime;
1901*4f1223e8SApple OSS Distributions }
1902*4f1223e8SApple OSS Distributions
1903*4f1223e8SApple OSS Distributions static void
workq_add_new_threads_call(void * _p,void * flags)1904*4f1223e8SApple OSS Distributions workq_add_new_threads_call(void *_p, void *flags)
1905*4f1223e8SApple OSS Distributions {
1906*4f1223e8SApple OSS Distributions proc_t p = _p;
1907*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
1908*4f1223e8SApple OSS Distributions uint32_t my_flag = (uint32_t)(uintptr_t)flags;
1909*4f1223e8SApple OSS Distributions
1910*4f1223e8SApple OSS Distributions /*
1911*4f1223e8SApple OSS Distributions * workq_exit() will set the workqueue to NULL before
1912*4f1223e8SApple OSS Distributions * it cancels thread calls.
1913*4f1223e8SApple OSS Distributions */
1914*4f1223e8SApple OSS Distributions if (!wq) {
1915*4f1223e8SApple OSS Distributions return;
1916*4f1223e8SApple OSS Distributions }
1917*4f1223e8SApple OSS Distributions
1918*4f1223e8SApple OSS Distributions assert((my_flag == WQ_DELAYED_CALL_SCHEDULED) ||
1919*4f1223e8SApple OSS Distributions (my_flag == WQ_IMMEDIATE_CALL_SCHEDULED));
1920*4f1223e8SApple OSS Distributions
1921*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_START, wq, _wq_flags(wq),
1922*4f1223e8SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1923*4f1223e8SApple OSS Distributions
1924*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
1925*4f1223e8SApple OSS Distributions
1926*4f1223e8SApple OSS Distributions wq->wq_thread_call_last_run = mach_absolute_time();
1927*4f1223e8SApple OSS Distributions os_atomic_andnot(&wq->wq_flags, my_flag, release);
1928*4f1223e8SApple OSS Distributions
1929*4f1223e8SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
1930*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
1931*4f1223e8SApple OSS Distributions
1932*4f1223e8SApple OSS Distributions workq_unlock(wq);
1933*4f1223e8SApple OSS Distributions
1934*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_add_timer | DBG_FUNC_END, wq, 0,
1935*4f1223e8SApple OSS Distributions wq->wq_nthreads, wq->wq_thidlecount);
1936*4f1223e8SApple OSS Distributions }
1937*4f1223e8SApple OSS Distributions
1938*4f1223e8SApple OSS Distributions #pragma mark thread state tracking
1939*4f1223e8SApple OSS Distributions
1940*4f1223e8SApple OSS Distributions static void
workq_sched_callback(int type,thread_t thread)1941*4f1223e8SApple OSS Distributions workq_sched_callback(int type, thread_t thread)
1942*4f1223e8SApple OSS Distributions {
1943*4f1223e8SApple OSS Distributions thread_ro_t tro = get_thread_ro(thread);
1944*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
1945*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(tro->tro_proc);
1946*4f1223e8SApple OSS Distributions thread_qos_t req_qos, qos = uth->uu_workq_pri.qos_bucket;
1947*4f1223e8SApple OSS Distributions wq_thactive_t old_thactive;
1948*4f1223e8SApple OSS Distributions bool start_timer = false;
1949*4f1223e8SApple OSS Distributions
1950*4f1223e8SApple OSS Distributions if (qos == WORKQ_THREAD_QOS_MANAGER) {
1951*4f1223e8SApple OSS Distributions return;
1952*4f1223e8SApple OSS Distributions }
1953*4f1223e8SApple OSS Distributions
1954*4f1223e8SApple OSS Distributions switch (type) {
1955*4f1223e8SApple OSS Distributions case SCHED_CALL_BLOCK:
1956*4f1223e8SApple OSS Distributions old_thactive = _wq_thactive_dec(wq, qos);
1957*4f1223e8SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
1958*4f1223e8SApple OSS Distributions
1959*4f1223e8SApple OSS Distributions /*
1960*4f1223e8SApple OSS Distributions * Remember the timestamp of the last thread that blocked in this
1961*4f1223e8SApple OSS Distributions * bucket, it used used by admission checks to ignore one thread
1962*4f1223e8SApple OSS Distributions * being inactive if this timestamp is recent enough.
1963*4f1223e8SApple OSS Distributions *
1964*4f1223e8SApple OSS Distributions * If we collide with another thread trying to update the
1965*4f1223e8SApple OSS Distributions * last_blocked (really unlikely since another thread would have to
1966*4f1223e8SApple OSS Distributions * get scheduled and then block after we start down this path), it's
1967*4f1223e8SApple OSS Distributions * not a problem. Either timestamp is adequate, so no need to retry
1968*4f1223e8SApple OSS Distributions */
1969*4f1223e8SApple OSS Distributions os_atomic_store_wide(&wq->wq_lastblocked_ts[_wq_bucket(qos)],
1970*4f1223e8SApple OSS Distributions thread_last_run_time(thread), relaxed);
1971*4f1223e8SApple OSS Distributions
1972*4f1223e8SApple OSS Distributions if (req_qos == THREAD_QOS_UNSPECIFIED) {
1973*4f1223e8SApple OSS Distributions /*
1974*4f1223e8SApple OSS Distributions * No pending request at the moment we could unblock, move on.
1975*4f1223e8SApple OSS Distributions */
1976*4f1223e8SApple OSS Distributions } else if (qos < req_qos) {
1977*4f1223e8SApple OSS Distributions /*
1978*4f1223e8SApple OSS Distributions * The blocking thread is at a lower QoS than the highest currently
1979*4f1223e8SApple OSS Distributions * pending constrained request, nothing has to be redriven
1980*4f1223e8SApple OSS Distributions */
1981*4f1223e8SApple OSS Distributions } else {
1982*4f1223e8SApple OSS Distributions uint32_t max_busycount, old_req_count;
1983*4f1223e8SApple OSS Distributions old_req_count = _wq_thactive_aggregate_downto_qos(wq, old_thactive,
1984*4f1223e8SApple OSS Distributions req_qos, NULL, &max_busycount);
1985*4f1223e8SApple OSS Distributions /*
1986*4f1223e8SApple OSS Distributions * If it is possible that may_start_constrained_thread had refused
1987*4f1223e8SApple OSS Distributions * admission due to being over the max concurrency, we may need to
1988*4f1223e8SApple OSS Distributions * spin up a new thread.
1989*4f1223e8SApple OSS Distributions *
1990*4f1223e8SApple OSS Distributions * We take into account the maximum number of busy threads
1991*4f1223e8SApple OSS Distributions * that can affect may_start_constrained_thread as looking at the
1992*4f1223e8SApple OSS Distributions * actual number may_start_constrained_thread will see is racy.
1993*4f1223e8SApple OSS Distributions *
1994*4f1223e8SApple OSS Distributions * IOW at NCPU = 4, for IN (req_qos = 1), if the old req count is
1995*4f1223e8SApple OSS Distributions * between NCPU (4) and NCPU - 2 (2) we need to redrive.
1996*4f1223e8SApple OSS Distributions */
1997*4f1223e8SApple OSS Distributions uint32_t conc = wq_max_parallelism[_wq_bucket(qos)];
1998*4f1223e8SApple OSS Distributions if (old_req_count <= conc && conc <= old_req_count + max_busycount) {
1999*4f1223e8SApple OSS Distributions start_timer = workq_schedule_delayed_thread_creation(wq, 0);
2000*4f1223e8SApple OSS Distributions }
2001*4f1223e8SApple OSS Distributions }
2002*4f1223e8SApple OSS Distributions if (__improbable(kdebug_enable)) {
2003*4f1223e8SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
2004*4f1223e8SApple OSS Distributions old_thactive, qos, NULL, NULL);
2005*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_START, wq,
2006*4f1223e8SApple OSS Distributions old - 1, qos | (req_qos << 8),
2007*4f1223e8SApple OSS Distributions wq->wq_reqcount << 1 | start_timer);
2008*4f1223e8SApple OSS Distributions }
2009*4f1223e8SApple OSS Distributions break;
2010*4f1223e8SApple OSS Distributions
2011*4f1223e8SApple OSS Distributions case SCHED_CALL_UNBLOCK:
2012*4f1223e8SApple OSS Distributions /*
2013*4f1223e8SApple OSS Distributions * we cannot take the workqueue_lock here...
2014*4f1223e8SApple OSS Distributions * an UNBLOCK can occur from a timer event which
2015*4f1223e8SApple OSS Distributions * is run from an interrupt context... if the workqueue_lock
2016*4f1223e8SApple OSS Distributions * is already held by this processor, we'll deadlock...
2017*4f1223e8SApple OSS Distributions * the thread lock for the thread being UNBLOCKED
2018*4f1223e8SApple OSS Distributions * is also held
2019*4f1223e8SApple OSS Distributions */
2020*4f1223e8SApple OSS Distributions old_thactive = _wq_thactive_inc(wq, qos);
2021*4f1223e8SApple OSS Distributions if (__improbable(kdebug_enable)) {
2022*4f1223e8SApple OSS Distributions __unused uint32_t old = _wq_thactive_aggregate_downto_qos(wq,
2023*4f1223e8SApple OSS Distributions old_thactive, qos, NULL, NULL);
2024*4f1223e8SApple OSS Distributions req_qos = WQ_THACTIVE_BEST_CONSTRAINED_REQ_QOS(old_thactive);
2025*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_block | DBG_FUNC_END, wq,
2026*4f1223e8SApple OSS Distributions old + 1, qos | (req_qos << 8),
2027*4f1223e8SApple OSS Distributions wq->wq_threads_scheduled);
2028*4f1223e8SApple OSS Distributions }
2029*4f1223e8SApple OSS Distributions break;
2030*4f1223e8SApple OSS Distributions }
2031*4f1223e8SApple OSS Distributions }
2032*4f1223e8SApple OSS Distributions
2033*4f1223e8SApple OSS Distributions #pragma mark workq lifecycle
2034*4f1223e8SApple OSS Distributions
2035*4f1223e8SApple OSS Distributions void
workq_reference(struct workqueue * wq)2036*4f1223e8SApple OSS Distributions workq_reference(struct workqueue *wq)
2037*4f1223e8SApple OSS Distributions {
2038*4f1223e8SApple OSS Distributions os_ref_retain(&wq->wq_refcnt);
2039*4f1223e8SApple OSS Distributions }
2040*4f1223e8SApple OSS Distributions
2041*4f1223e8SApple OSS Distributions static void
workq_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)2042*4f1223e8SApple OSS Distributions workq_deallocate_queue_invoke(mpsc_queue_chain_t e,
2043*4f1223e8SApple OSS Distributions __assert_only mpsc_daemon_queue_t dq)
2044*4f1223e8SApple OSS Distributions {
2045*4f1223e8SApple OSS Distributions struct workqueue *wq;
2046*4f1223e8SApple OSS Distributions struct turnstile *ts;
2047*4f1223e8SApple OSS Distributions
2048*4f1223e8SApple OSS Distributions wq = mpsc_queue_element(e, struct workqueue, wq_destroy_link);
2049*4f1223e8SApple OSS Distributions assert(dq == &workq_deallocate_queue);
2050*4f1223e8SApple OSS Distributions
2051*4f1223e8SApple OSS Distributions turnstile_complete((uintptr_t)wq, &wq->wq_turnstile, &ts, TURNSTILE_WORKQS);
2052*4f1223e8SApple OSS Distributions assert(ts);
2053*4f1223e8SApple OSS Distributions turnstile_cleanup();
2054*4f1223e8SApple OSS Distributions turnstile_deallocate(ts);
2055*4f1223e8SApple OSS Distributions
2056*4f1223e8SApple OSS Distributions lck_ticket_destroy(&wq->wq_lock, &workq_lck_grp);
2057*4f1223e8SApple OSS Distributions zfree(workq_zone_workqueue, wq);
2058*4f1223e8SApple OSS Distributions }
2059*4f1223e8SApple OSS Distributions
2060*4f1223e8SApple OSS Distributions static void
workq_deallocate(struct workqueue * wq)2061*4f1223e8SApple OSS Distributions workq_deallocate(struct workqueue *wq)
2062*4f1223e8SApple OSS Distributions {
2063*4f1223e8SApple OSS Distributions if (os_ref_release_relaxed(&wq->wq_refcnt) == 0) {
2064*4f1223e8SApple OSS Distributions workq_deallocate_queue_invoke(&wq->wq_destroy_link,
2065*4f1223e8SApple OSS Distributions &workq_deallocate_queue);
2066*4f1223e8SApple OSS Distributions }
2067*4f1223e8SApple OSS Distributions }
2068*4f1223e8SApple OSS Distributions
2069*4f1223e8SApple OSS Distributions void
workq_deallocate_safe(struct workqueue * wq)2070*4f1223e8SApple OSS Distributions workq_deallocate_safe(struct workqueue *wq)
2071*4f1223e8SApple OSS Distributions {
2072*4f1223e8SApple OSS Distributions if (__improbable(os_ref_release_relaxed(&wq->wq_refcnt) == 0)) {
2073*4f1223e8SApple OSS Distributions mpsc_daemon_enqueue(&workq_deallocate_queue, &wq->wq_destroy_link,
2074*4f1223e8SApple OSS Distributions MPSC_QUEUE_DISABLE_PREEMPTION);
2075*4f1223e8SApple OSS Distributions }
2076*4f1223e8SApple OSS Distributions }
2077*4f1223e8SApple OSS Distributions
2078*4f1223e8SApple OSS Distributions /**
2079*4f1223e8SApple OSS Distributions * Setup per-process state for the workqueue.
2080*4f1223e8SApple OSS Distributions */
2081*4f1223e8SApple OSS Distributions int
workq_open(struct proc * p,__unused struct workq_open_args * uap,__unused int32_t * retval)2082*4f1223e8SApple OSS Distributions workq_open(struct proc *p, __unused struct workq_open_args *uap,
2083*4f1223e8SApple OSS Distributions __unused int32_t *retval)
2084*4f1223e8SApple OSS Distributions {
2085*4f1223e8SApple OSS Distributions struct workqueue *wq;
2086*4f1223e8SApple OSS Distributions int error = 0;
2087*4f1223e8SApple OSS Distributions
2088*4f1223e8SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
2089*4f1223e8SApple OSS Distributions return EINVAL;
2090*4f1223e8SApple OSS Distributions }
2091*4f1223e8SApple OSS Distributions
2092*4f1223e8SApple OSS Distributions if (wq_init_constrained_limit) {
2093*4f1223e8SApple OSS Distributions uint32_t limit, num_cpus = ml_wait_max_cpus();
2094*4f1223e8SApple OSS Distributions
2095*4f1223e8SApple OSS Distributions /*
2096*4f1223e8SApple OSS Distributions * set up the limit for the constrained pool
2097*4f1223e8SApple OSS Distributions * this is a virtual pool in that we don't
2098*4f1223e8SApple OSS Distributions * maintain it on a separate idle and run list
2099*4f1223e8SApple OSS Distributions */
2100*4f1223e8SApple OSS Distributions limit = num_cpus * WORKQUEUE_CONSTRAINED_FACTOR;
2101*4f1223e8SApple OSS Distributions
2102*4f1223e8SApple OSS Distributions if (limit > wq_max_constrained_threads) {
2103*4f1223e8SApple OSS Distributions wq_max_constrained_threads = limit;
2104*4f1223e8SApple OSS Distributions }
2105*4f1223e8SApple OSS Distributions
2106*4f1223e8SApple OSS Distributions if (wq_max_threads > WQ_THACTIVE_BUCKET_HALF) {
2107*4f1223e8SApple OSS Distributions wq_max_threads = WQ_THACTIVE_BUCKET_HALF;
2108*4f1223e8SApple OSS Distributions }
2109*4f1223e8SApple OSS Distributions if (wq_max_threads > CONFIG_THREAD_MAX - 20) {
2110*4f1223e8SApple OSS Distributions wq_max_threads = CONFIG_THREAD_MAX - 20;
2111*4f1223e8SApple OSS Distributions }
2112*4f1223e8SApple OSS Distributions
2113*4f1223e8SApple OSS Distributions wq_death_max_load = (uint16_t)fls(num_cpus) + 1;
2114*4f1223e8SApple OSS Distributions
2115*4f1223e8SApple OSS Distributions for (thread_qos_t qos = WORKQ_THREAD_QOS_MIN; qos <= WORKQ_THREAD_QOS_MAX; qos++) {
2116*4f1223e8SApple OSS Distributions wq_max_parallelism[_wq_bucket(qos)] =
2117*4f1223e8SApple OSS Distributions qos_max_parallelism(qos, QOS_PARALLELISM_COUNT_LOGICAL);
2118*4f1223e8SApple OSS Distributions }
2119*4f1223e8SApple OSS Distributions
2120*4f1223e8SApple OSS Distributions wq_max_cooperative_threads = num_cpus;
2121*4f1223e8SApple OSS Distributions
2122*4f1223e8SApple OSS Distributions wq_init_constrained_limit = 0;
2123*4f1223e8SApple OSS Distributions }
2124*4f1223e8SApple OSS Distributions
2125*4f1223e8SApple OSS Distributions if (proc_get_wqptr(p) == NULL) {
2126*4f1223e8SApple OSS Distributions if (proc_init_wqptr_or_wait(p) == FALSE) {
2127*4f1223e8SApple OSS Distributions assert(proc_get_wqptr(p) != NULL);
2128*4f1223e8SApple OSS Distributions goto out;
2129*4f1223e8SApple OSS Distributions }
2130*4f1223e8SApple OSS Distributions
2131*4f1223e8SApple OSS Distributions wq = zalloc_flags(workq_zone_workqueue, Z_WAITOK | Z_ZERO);
2132*4f1223e8SApple OSS Distributions
2133*4f1223e8SApple OSS Distributions os_ref_init_count(&wq->wq_refcnt, &workq_refgrp, 1);
2134*4f1223e8SApple OSS Distributions
2135*4f1223e8SApple OSS Distributions // Start the event manager at the priority hinted at by the policy engine
2136*4f1223e8SApple OSS Distributions thread_qos_t mgr_priority_hint = task_get_default_manager_qos(current_task());
2137*4f1223e8SApple OSS Distributions pthread_priority_t pp = _pthread_priority_make_from_thread_qos(mgr_priority_hint, 0, 0);
2138*4f1223e8SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pp;
2139*4f1223e8SApple OSS Distributions wq->wq_timer_interval = (uint32_t)wq_stalled_window.abstime;
2140*4f1223e8SApple OSS Distributions wq->wq_proc = p;
2141*4f1223e8SApple OSS Distributions turnstile_prepare((uintptr_t)wq, &wq->wq_turnstile, turnstile_alloc(),
2142*4f1223e8SApple OSS Distributions TURNSTILE_WORKQS);
2143*4f1223e8SApple OSS Distributions
2144*4f1223e8SApple OSS Distributions TAILQ_INIT(&wq->wq_thrunlist);
2145*4f1223e8SApple OSS Distributions TAILQ_INIT(&wq->wq_thnewlist);
2146*4f1223e8SApple OSS Distributions TAILQ_INIT(&wq->wq_thidlelist);
2147*4f1223e8SApple OSS Distributions priority_queue_init(&wq->wq_overcommit_queue);
2148*4f1223e8SApple OSS Distributions priority_queue_init(&wq->wq_constrained_queue);
2149*4f1223e8SApple OSS Distributions priority_queue_init(&wq->wq_special_queue);
2150*4f1223e8SApple OSS Distributions for (int bucket = 0; bucket < WORKQ_NUM_QOS_BUCKETS; bucket++) {
2151*4f1223e8SApple OSS Distributions STAILQ_INIT(&wq->wq_cooperative_queue[bucket]);
2152*4f1223e8SApple OSS Distributions }
2153*4f1223e8SApple OSS Distributions
2154*4f1223e8SApple OSS Distributions /* We are only using the delayed thread call for the constrained pool
2155*4f1223e8SApple OSS Distributions * which can't have work at >= UI QoS and so we can be fine with a
2156*4f1223e8SApple OSS Distributions * UI QoS thread call.
2157*4f1223e8SApple OSS Distributions */
2158*4f1223e8SApple OSS Distributions wq->wq_delayed_call = thread_call_allocate_with_qos(
2159*4f1223e8SApple OSS Distributions workq_add_new_threads_call, p, THREAD_QOS_USER_INTERACTIVE,
2160*4f1223e8SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2161*4f1223e8SApple OSS Distributions wq->wq_immediate_call = thread_call_allocate_with_options(
2162*4f1223e8SApple OSS Distributions workq_add_new_threads_call, p, THREAD_CALL_PRIORITY_KERNEL,
2163*4f1223e8SApple OSS Distributions THREAD_CALL_OPTIONS_ONCE);
2164*4f1223e8SApple OSS Distributions wq->wq_death_call = thread_call_allocate_with_options(
2165*4f1223e8SApple OSS Distributions workq_kill_old_threads_call, wq,
2166*4f1223e8SApple OSS Distributions THREAD_CALL_PRIORITY_USER, THREAD_CALL_OPTIONS_ONCE);
2167*4f1223e8SApple OSS Distributions
2168*4f1223e8SApple OSS Distributions lck_ticket_init(&wq->wq_lock, &workq_lck_grp);
2169*4f1223e8SApple OSS Distributions
2170*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_create | DBG_FUNC_NONE, wq,
2171*4f1223e8SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2172*4f1223e8SApple OSS Distributions proc_set_wqptr(p, wq);
2173*4f1223e8SApple OSS Distributions }
2174*4f1223e8SApple OSS Distributions out:
2175*4f1223e8SApple OSS Distributions
2176*4f1223e8SApple OSS Distributions return error;
2177*4f1223e8SApple OSS Distributions }
2178*4f1223e8SApple OSS Distributions
2179*4f1223e8SApple OSS Distributions /*
2180*4f1223e8SApple OSS Distributions * Routine: workq_mark_exiting
2181*4f1223e8SApple OSS Distributions *
2182*4f1223e8SApple OSS Distributions * Function: Mark the work queue such that new threads will not be added to the
2183*4f1223e8SApple OSS Distributions * work queue after we return.
2184*4f1223e8SApple OSS Distributions *
2185*4f1223e8SApple OSS Distributions * Conditions: Called against the current process.
2186*4f1223e8SApple OSS Distributions */
2187*4f1223e8SApple OSS Distributions void
workq_mark_exiting(struct proc * p)2188*4f1223e8SApple OSS Distributions workq_mark_exiting(struct proc *p)
2189*4f1223e8SApple OSS Distributions {
2190*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2191*4f1223e8SApple OSS Distributions uint32_t wq_flags;
2192*4f1223e8SApple OSS Distributions workq_threadreq_t mgr_req;
2193*4f1223e8SApple OSS Distributions
2194*4f1223e8SApple OSS Distributions if (!wq) {
2195*4f1223e8SApple OSS Distributions return;
2196*4f1223e8SApple OSS Distributions }
2197*4f1223e8SApple OSS Distributions
2198*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_pthread_exit | DBG_FUNC_START, wq, 0, 0, 0);
2199*4f1223e8SApple OSS Distributions
2200*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
2201*4f1223e8SApple OSS Distributions
2202*4f1223e8SApple OSS Distributions wq_flags = os_atomic_or_orig(&wq->wq_flags, WQ_EXITING, relaxed);
2203*4f1223e8SApple OSS Distributions if (__improbable(wq_flags & WQ_EXITING)) {
2204*4f1223e8SApple OSS Distributions panic("workq_mark_exiting called twice");
2205*4f1223e8SApple OSS Distributions }
2206*4f1223e8SApple OSS Distributions
2207*4f1223e8SApple OSS Distributions /*
2208*4f1223e8SApple OSS Distributions * Opportunistically try to cancel thread calls that are likely in flight.
2209*4f1223e8SApple OSS Distributions * workq_exit() will do the proper cleanup.
2210*4f1223e8SApple OSS Distributions */
2211*4f1223e8SApple OSS Distributions if (wq_flags & WQ_IMMEDIATE_CALL_SCHEDULED) {
2212*4f1223e8SApple OSS Distributions thread_call_cancel(wq->wq_immediate_call);
2213*4f1223e8SApple OSS Distributions }
2214*4f1223e8SApple OSS Distributions if (wq_flags & WQ_DELAYED_CALL_SCHEDULED) {
2215*4f1223e8SApple OSS Distributions thread_call_cancel(wq->wq_delayed_call);
2216*4f1223e8SApple OSS Distributions }
2217*4f1223e8SApple OSS Distributions if (wq_flags & WQ_DEATH_CALL_SCHEDULED) {
2218*4f1223e8SApple OSS Distributions thread_call_cancel(wq->wq_death_call);
2219*4f1223e8SApple OSS Distributions }
2220*4f1223e8SApple OSS Distributions
2221*4f1223e8SApple OSS Distributions mgr_req = wq->wq_event_manager_threadreq;
2222*4f1223e8SApple OSS Distributions wq->wq_event_manager_threadreq = NULL;
2223*4f1223e8SApple OSS Distributions wq->wq_reqcount = 0; /* workq_schedule_creator must not look at queues */
2224*4f1223e8SApple OSS Distributions wq->wq_creator = NULL;
2225*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
2226*4f1223e8SApple OSS Distributions
2227*4f1223e8SApple OSS Distributions workq_unlock(wq);
2228*4f1223e8SApple OSS Distributions
2229*4f1223e8SApple OSS Distributions if (mgr_req) {
2230*4f1223e8SApple OSS Distributions kqueue_threadreq_cancel(p, mgr_req);
2231*4f1223e8SApple OSS Distributions }
2232*4f1223e8SApple OSS Distributions /*
2233*4f1223e8SApple OSS Distributions * No one touches the priority queues once WQ_EXITING is set.
2234*4f1223e8SApple OSS Distributions * It is hence safe to do the tear down without holding any lock.
2235*4f1223e8SApple OSS Distributions */
2236*4f1223e8SApple OSS Distributions priority_queue_destroy(&wq->wq_overcommit_queue,
2237*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2238*4f1223e8SApple OSS Distributions workq_threadreq_destroy(p, e);
2239*4f1223e8SApple OSS Distributions });
2240*4f1223e8SApple OSS Distributions priority_queue_destroy(&wq->wq_constrained_queue,
2241*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2242*4f1223e8SApple OSS Distributions workq_threadreq_destroy(p, e);
2243*4f1223e8SApple OSS Distributions });
2244*4f1223e8SApple OSS Distributions priority_queue_destroy(&wq->wq_special_queue,
2245*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry, ^(workq_threadreq_t e){
2246*4f1223e8SApple OSS Distributions workq_threadreq_destroy(p, e);
2247*4f1223e8SApple OSS Distributions });
2248*4f1223e8SApple OSS Distributions
2249*4f1223e8SApple OSS Distributions WQ_TRACE(TRACE_wq_pthread_exit | DBG_FUNC_END, 0, 0, 0, 0);
2250*4f1223e8SApple OSS Distributions }
2251*4f1223e8SApple OSS Distributions
2252*4f1223e8SApple OSS Distributions /*
2253*4f1223e8SApple OSS Distributions * Routine: workq_exit
2254*4f1223e8SApple OSS Distributions *
2255*4f1223e8SApple OSS Distributions * Function: clean up the work queue structure(s) now that there are no threads
2256*4f1223e8SApple OSS Distributions * left running inside the work queue (except possibly current_thread).
2257*4f1223e8SApple OSS Distributions *
2258*4f1223e8SApple OSS Distributions * Conditions: Called by the last thread in the process.
2259*4f1223e8SApple OSS Distributions * Called against current process.
2260*4f1223e8SApple OSS Distributions */
2261*4f1223e8SApple OSS Distributions void
workq_exit(struct proc * p)2262*4f1223e8SApple OSS Distributions workq_exit(struct proc *p)
2263*4f1223e8SApple OSS Distributions {
2264*4f1223e8SApple OSS Distributions struct workqueue *wq;
2265*4f1223e8SApple OSS Distributions struct uthread *uth, *tmp;
2266*4f1223e8SApple OSS Distributions
2267*4f1223e8SApple OSS Distributions wq = os_atomic_xchg(&p->p_wqptr, NULL, relaxed);
2268*4f1223e8SApple OSS Distributions if (wq != NULL) {
2269*4f1223e8SApple OSS Distributions thread_t th = current_thread();
2270*4f1223e8SApple OSS Distributions
2271*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_workqueue_exit | DBG_FUNC_START, wq, 0, 0, 0);
2272*4f1223e8SApple OSS Distributions
2273*4f1223e8SApple OSS Distributions if (thread_get_tag(th) & THREAD_TAG_WORKQUEUE) {
2274*4f1223e8SApple OSS Distributions /*
2275*4f1223e8SApple OSS Distributions * <rdar://problem/40111515> Make sure we will no longer call the
2276*4f1223e8SApple OSS Distributions * sched call, if we ever block this thread, which the cancel_wait
2277*4f1223e8SApple OSS Distributions * below can do.
2278*4f1223e8SApple OSS Distributions */
2279*4f1223e8SApple OSS Distributions thread_sched_call(th, NULL);
2280*4f1223e8SApple OSS Distributions }
2281*4f1223e8SApple OSS Distributions
2282*4f1223e8SApple OSS Distributions /*
2283*4f1223e8SApple OSS Distributions * Thread calls are always scheduled by the proc itself or under the
2284*4f1223e8SApple OSS Distributions * workqueue spinlock if WQ_EXITING is not yet set.
2285*4f1223e8SApple OSS Distributions *
2286*4f1223e8SApple OSS Distributions * Either way, when this runs, the proc has no threads left beside
2287*4f1223e8SApple OSS Distributions * the one running this very code, so we know no thread call can be
2288*4f1223e8SApple OSS Distributions * dispatched anymore.
2289*4f1223e8SApple OSS Distributions */
2290*4f1223e8SApple OSS Distributions thread_call_cancel_wait(wq->wq_delayed_call);
2291*4f1223e8SApple OSS Distributions thread_call_cancel_wait(wq->wq_immediate_call);
2292*4f1223e8SApple OSS Distributions thread_call_cancel_wait(wq->wq_death_call);
2293*4f1223e8SApple OSS Distributions thread_call_free(wq->wq_delayed_call);
2294*4f1223e8SApple OSS Distributions thread_call_free(wq->wq_immediate_call);
2295*4f1223e8SApple OSS Distributions thread_call_free(wq->wq_death_call);
2296*4f1223e8SApple OSS Distributions
2297*4f1223e8SApple OSS Distributions /*
2298*4f1223e8SApple OSS Distributions * Clean up workqueue data structures for threads that exited and
2299*4f1223e8SApple OSS Distributions * didn't get a chance to clean up after themselves.
2300*4f1223e8SApple OSS Distributions *
2301*4f1223e8SApple OSS Distributions * idle/new threads should have been interrupted and died on their own
2302*4f1223e8SApple OSS Distributions */
2303*4f1223e8SApple OSS Distributions TAILQ_FOREACH_SAFE(uth, &wq->wq_thrunlist, uu_workq_entry, tmp) {
2304*4f1223e8SApple OSS Distributions thread_t mth = get_machthread(uth);
2305*4f1223e8SApple OSS Distributions thread_sched_call(mth, NULL);
2306*4f1223e8SApple OSS Distributions thread_deallocate(mth);
2307*4f1223e8SApple OSS Distributions }
2308*4f1223e8SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thnewlist));
2309*4f1223e8SApple OSS Distributions assert(TAILQ_EMPTY(&wq->wq_thidlelist));
2310*4f1223e8SApple OSS Distributions
2311*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_destroy | DBG_FUNC_END, wq,
2312*4f1223e8SApple OSS Distributions VM_KERNEL_ADDRHIDE(wq), 0, 0);
2313*4f1223e8SApple OSS Distributions
2314*4f1223e8SApple OSS Distributions workq_deallocate(wq);
2315*4f1223e8SApple OSS Distributions
2316*4f1223e8SApple OSS Distributions WQ_TRACE(TRACE_wq_workqueue_exit | DBG_FUNC_END, 0, 0, 0, 0);
2317*4f1223e8SApple OSS Distributions }
2318*4f1223e8SApple OSS Distributions }
2319*4f1223e8SApple OSS Distributions
2320*4f1223e8SApple OSS Distributions
2321*4f1223e8SApple OSS Distributions #pragma mark bsd thread control
2322*4f1223e8SApple OSS Distributions
2323*4f1223e8SApple OSS Distributions bool
bsdthread_part_of_cooperative_workqueue(struct uthread * uth)2324*4f1223e8SApple OSS Distributions bsdthread_part_of_cooperative_workqueue(struct uthread *uth)
2325*4f1223e8SApple OSS Distributions {
2326*4f1223e8SApple OSS Distributions return (workq_thread_is_cooperative(uth) || workq_thread_is_nonovercommit(uth)) &&
2327*4f1223e8SApple OSS Distributions (uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER) &&
2328*4f1223e8SApple OSS Distributions (!workq_thread_is_permanently_bound(uth));
2329*4f1223e8SApple OSS Distributions }
2330*4f1223e8SApple OSS Distributions
2331*4f1223e8SApple OSS Distributions static bool
_pthread_priority_to_policy(pthread_priority_t priority,thread_qos_policy_data_t * data)2332*4f1223e8SApple OSS Distributions _pthread_priority_to_policy(pthread_priority_t priority,
2333*4f1223e8SApple OSS Distributions thread_qos_policy_data_t *data)
2334*4f1223e8SApple OSS Distributions {
2335*4f1223e8SApple OSS Distributions data->qos_tier = _pthread_priority_thread_qos(priority);
2336*4f1223e8SApple OSS Distributions data->tier_importance = _pthread_priority_relpri(priority);
2337*4f1223e8SApple OSS Distributions if (data->qos_tier == THREAD_QOS_UNSPECIFIED || data->tier_importance > 0 ||
2338*4f1223e8SApple OSS Distributions data->tier_importance < THREAD_QOS_MIN_TIER_IMPORTANCE) {
2339*4f1223e8SApple OSS Distributions return false;
2340*4f1223e8SApple OSS Distributions }
2341*4f1223e8SApple OSS Distributions return true;
2342*4f1223e8SApple OSS Distributions }
2343*4f1223e8SApple OSS Distributions
2344*4f1223e8SApple OSS Distributions static int
bsdthread_set_self(proc_t p,thread_t th,pthread_priority_t priority,mach_port_name_t voucher,enum workq_set_self_flags flags)2345*4f1223e8SApple OSS Distributions bsdthread_set_self(proc_t p, thread_t th, pthread_priority_t priority,
2346*4f1223e8SApple OSS Distributions mach_port_name_t voucher, enum workq_set_self_flags flags)
2347*4f1223e8SApple OSS Distributions {
2348*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
2349*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2350*4f1223e8SApple OSS Distributions
2351*4f1223e8SApple OSS Distributions kern_return_t kr;
2352*4f1223e8SApple OSS Distributions int unbind_rv = 0, qos_rv = 0, voucher_rv = 0, fixedpri_rv = 0;
2353*4f1223e8SApple OSS Distributions bool is_wq_thread = (thread_get_tag(th) & THREAD_TAG_WORKQUEUE);
2354*4f1223e8SApple OSS Distributions
2355*4f1223e8SApple OSS Distributions assert(th == current_thread());
2356*4f1223e8SApple OSS Distributions if (flags & WORKQ_SET_SELF_WQ_KEVENT_UNBIND) {
2357*4f1223e8SApple OSS Distributions if (!is_wq_thread) {
2358*4f1223e8SApple OSS Distributions unbind_rv = EINVAL;
2359*4f1223e8SApple OSS Distributions goto qos;
2360*4f1223e8SApple OSS Distributions }
2361*4f1223e8SApple OSS Distributions
2362*4f1223e8SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
2363*4f1223e8SApple OSS Distributions unbind_rv = EINVAL;
2364*4f1223e8SApple OSS Distributions goto qos;
2365*4f1223e8SApple OSS Distributions }
2366*4f1223e8SApple OSS Distributions
2367*4f1223e8SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
2368*4f1223e8SApple OSS Distributions if (kqr == NULL) {
2369*4f1223e8SApple OSS Distributions unbind_rv = EALREADY;
2370*4f1223e8SApple OSS Distributions goto qos;
2371*4f1223e8SApple OSS Distributions }
2372*4f1223e8SApple OSS Distributions
2373*4f1223e8SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2374*4f1223e8SApple OSS Distributions unbind_rv = EINVAL;
2375*4f1223e8SApple OSS Distributions goto qos;
2376*4f1223e8SApple OSS Distributions }
2377*4f1223e8SApple OSS Distributions
2378*4f1223e8SApple OSS Distributions kqueue_threadreq_unbind(p, kqr);
2379*4f1223e8SApple OSS Distributions }
2380*4f1223e8SApple OSS Distributions
2381*4f1223e8SApple OSS Distributions qos:
2382*4f1223e8SApple OSS Distributions if (flags & (WORKQ_SET_SELF_QOS_FLAG | WORKQ_SET_SELF_QOS_OVERRIDE_FLAG)) {
2383*4f1223e8SApple OSS Distributions assert(flags & WORKQ_SET_SELF_QOS_FLAG);
2384*4f1223e8SApple OSS Distributions
2385*4f1223e8SApple OSS Distributions thread_qos_policy_data_t new_policy;
2386*4f1223e8SApple OSS Distributions thread_qos_t qos_override = THREAD_QOS_UNSPECIFIED;
2387*4f1223e8SApple OSS Distributions
2388*4f1223e8SApple OSS Distributions if (!_pthread_priority_to_policy(priority, &new_policy)) {
2389*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2390*4f1223e8SApple OSS Distributions goto voucher;
2391*4f1223e8SApple OSS Distributions }
2392*4f1223e8SApple OSS Distributions
2393*4f1223e8SApple OSS Distributions if (flags & WORKQ_SET_SELF_QOS_OVERRIDE_FLAG) {
2394*4f1223e8SApple OSS Distributions /*
2395*4f1223e8SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is set, we definitely
2396*4f1223e8SApple OSS Distributions * should have an override QoS in the pthread_priority_t and we should
2397*4f1223e8SApple OSS Distributions * only come into this path for cooperative thread requests
2398*4f1223e8SApple OSS Distributions */
2399*4f1223e8SApple OSS Distributions if (!_pthread_priority_has_override_qos(priority) ||
2400*4f1223e8SApple OSS Distributions !_pthread_priority_is_cooperative(priority)) {
2401*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2402*4f1223e8SApple OSS Distributions goto voucher;
2403*4f1223e8SApple OSS Distributions }
2404*4f1223e8SApple OSS Distributions qos_override = _pthread_priority_thread_override_qos(priority);
2405*4f1223e8SApple OSS Distributions } else {
2406*4f1223e8SApple OSS Distributions /*
2407*4f1223e8SApple OSS Distributions * If the WORKQ_SET_SELF_QOS_OVERRIDE_FLAG is not set, we definitely
2408*4f1223e8SApple OSS Distributions * should not have an override QoS in the pthread_priority_t
2409*4f1223e8SApple OSS Distributions */
2410*4f1223e8SApple OSS Distributions if (_pthread_priority_has_override_qos(priority)) {
2411*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2412*4f1223e8SApple OSS Distributions goto voucher;
2413*4f1223e8SApple OSS Distributions }
2414*4f1223e8SApple OSS Distributions }
2415*4f1223e8SApple OSS Distributions
2416*4f1223e8SApple OSS Distributions if (!is_wq_thread) {
2417*4f1223e8SApple OSS Distributions /*
2418*4f1223e8SApple OSS Distributions * Threads opted out of QoS can't change QoS
2419*4f1223e8SApple OSS Distributions */
2420*4f1223e8SApple OSS Distributions if (!thread_has_qos_policy(th)) {
2421*4f1223e8SApple OSS Distributions qos_rv = EPERM;
2422*4f1223e8SApple OSS Distributions goto voucher;
2423*4f1223e8SApple OSS Distributions }
2424*4f1223e8SApple OSS Distributions } else if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER ||
2425*4f1223e8SApple OSS Distributions uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_ABOVEUI) {
2426*4f1223e8SApple OSS Distributions /*
2427*4f1223e8SApple OSS Distributions * Workqueue manager threads or threads above UI can't change QoS
2428*4f1223e8SApple OSS Distributions */
2429*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2430*4f1223e8SApple OSS Distributions goto voucher;
2431*4f1223e8SApple OSS Distributions } else {
2432*4f1223e8SApple OSS Distributions /*
2433*4f1223e8SApple OSS Distributions * For workqueue threads, possibly adjust buckets and redrive thread
2434*4f1223e8SApple OSS Distributions * requests.
2435*4f1223e8SApple OSS Distributions *
2436*4f1223e8SApple OSS Distributions * Transitions allowed:
2437*4f1223e8SApple OSS Distributions *
2438*4f1223e8SApple OSS Distributions * overcommit --> non-overcommit
2439*4f1223e8SApple OSS Distributions * overcommit --> overcommit
2440*4f1223e8SApple OSS Distributions * non-overcommit --> non-overcommit
2441*4f1223e8SApple OSS Distributions * non-overcommit --> overcommit (to be deprecated later)
2442*4f1223e8SApple OSS Distributions * cooperative --> cooperative
2443*4f1223e8SApple OSS Distributions *
2444*4f1223e8SApple OSS Distributions * All other transitions aren't allowed so reject them.
2445*4f1223e8SApple OSS Distributions */
2446*4f1223e8SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_cooperative(priority)) {
2447*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2448*4f1223e8SApple OSS Distributions goto voucher;
2449*4f1223e8SApple OSS Distributions } else if (workq_thread_is_cooperative(uth) && !_pthread_priority_is_cooperative(priority)) {
2450*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2451*4f1223e8SApple OSS Distributions goto voucher;
2452*4f1223e8SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_cooperative(priority)) {
2453*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2454*4f1223e8SApple OSS Distributions goto voucher;
2455*4f1223e8SApple OSS Distributions }
2456*4f1223e8SApple OSS Distributions
2457*4f1223e8SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2458*4f1223e8SApple OSS Distributions bool force_run = false;
2459*4f1223e8SApple OSS Distributions
2460*4f1223e8SApple OSS Distributions if (qos_override) {
2461*4f1223e8SApple OSS Distributions /*
2462*4f1223e8SApple OSS Distributions * We're in the case of a thread clarifying that it is for eg. not IN
2463*4f1223e8SApple OSS Distributions * req QoS but rather, UT req QoS with IN override. However, this can
2464*4f1223e8SApple OSS Distributions * race with a concurrent override happening to the thread via
2465*4f1223e8SApple OSS Distributions * workq_thread_add_dispatch_override so this needs to be
2466*4f1223e8SApple OSS Distributions * synchronized with the thread mutex.
2467*4f1223e8SApple OSS Distributions */
2468*4f1223e8SApple OSS Distributions thread_mtx_lock(th);
2469*4f1223e8SApple OSS Distributions }
2470*4f1223e8SApple OSS Distributions
2471*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
2472*4f1223e8SApple OSS Distributions
2473*4f1223e8SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2474*4f1223e8SApple OSS Distributions new_pri.qos_req = (thread_qos_t)new_policy.qos_tier;
2475*4f1223e8SApple OSS Distributions
2476*4f1223e8SApple OSS Distributions if (old_pri.qos_override < qos_override) {
2477*4f1223e8SApple OSS Distributions /*
2478*4f1223e8SApple OSS Distributions * Since this can race with a concurrent override via
2479*4f1223e8SApple OSS Distributions * workq_thread_add_dispatch_override, only adjust override value if we
2480*4f1223e8SApple OSS Distributions * are higher - this is a saturating function.
2481*4f1223e8SApple OSS Distributions *
2482*4f1223e8SApple OSS Distributions * We should not be changing the final override values, we should simply
2483*4f1223e8SApple OSS Distributions * be redistributing the current value with a different breakdown of req
2484*4f1223e8SApple OSS Distributions * vs override QoS - assert to that effect. Therefore, buckets should
2485*4f1223e8SApple OSS Distributions * not change.
2486*4f1223e8SApple OSS Distributions */
2487*4f1223e8SApple OSS Distributions new_pri.qos_override = qos_override;
2488*4f1223e8SApple OSS Distributions assert(workq_pri_override(new_pri) == workq_pri_override(old_pri));
2489*4f1223e8SApple OSS Distributions assert(workq_pri_bucket(new_pri) == workq_pri_bucket(old_pri));
2490*4f1223e8SApple OSS Distributions }
2491*4f1223e8SApple OSS Distributions
2492*4f1223e8SApple OSS Distributions /* Adjust schedule counts for various types of transitions */
2493*4f1223e8SApple OSS Distributions
2494*4f1223e8SApple OSS Distributions /* overcommit -> non-overcommit */
2495*4f1223e8SApple OSS Distributions if (workq_thread_is_overcommit(uth) && _pthread_priority_is_nonovercommit(priority)) {
2496*4f1223e8SApple OSS Distributions workq_thread_set_type(uth, 0);
2497*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
2498*4f1223e8SApple OSS Distributions
2499*4f1223e8SApple OSS Distributions /* non-overcommit -> overcommit */
2500*4f1223e8SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth) && _pthread_priority_is_overcommit(priority)) {
2501*4f1223e8SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
2502*4f1223e8SApple OSS Distributions force_run = (wq->wq_constrained_threads_scheduled-- == wq_max_constrained_threads);
2503*4f1223e8SApple OSS Distributions
2504*4f1223e8SApple OSS Distributions /* cooperative -> cooperative */
2505*4f1223e8SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
2506*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_pri.qos_req);
2507*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_pri.qos_req);
2508*4f1223e8SApple OSS Distributions
2509*4f1223e8SApple OSS Distributions /* We're changing schedule counts within cooperative pool, we
2510*4f1223e8SApple OSS Distributions * need to refresh best cooperative QoS logic again */
2511*4f1223e8SApple OSS Distributions force_run = _wq_cooperative_queue_refresh_best_req_qos(wq);
2512*4f1223e8SApple OSS Distributions }
2513*4f1223e8SApple OSS Distributions
2514*4f1223e8SApple OSS Distributions /*
2515*4f1223e8SApple OSS Distributions * This will set up an override on the thread if any and will also call
2516*4f1223e8SApple OSS Distributions * schedule_creator if needed
2517*4f1223e8SApple OSS Distributions */
2518*4f1223e8SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, force_run);
2519*4f1223e8SApple OSS Distributions workq_unlock(wq);
2520*4f1223e8SApple OSS Distributions
2521*4f1223e8SApple OSS Distributions if (qos_override) {
2522*4f1223e8SApple OSS Distributions thread_mtx_unlock(th);
2523*4f1223e8SApple OSS Distributions }
2524*4f1223e8SApple OSS Distributions
2525*4f1223e8SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
2526*4f1223e8SApple OSS Distributions thread_disarm_workqueue_quantum(th);
2527*4f1223e8SApple OSS Distributions } else {
2528*4f1223e8SApple OSS Distributions /* If the thread changed QoS buckets, the quantum duration
2529*4f1223e8SApple OSS Distributions * may have changed too */
2530*4f1223e8SApple OSS Distributions thread_arm_workqueue_quantum(th);
2531*4f1223e8SApple OSS Distributions }
2532*4f1223e8SApple OSS Distributions }
2533*4f1223e8SApple OSS Distributions
2534*4f1223e8SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_QOS_POLICY,
2535*4f1223e8SApple OSS Distributions (thread_policy_t)&new_policy, THREAD_QOS_POLICY_COUNT);
2536*4f1223e8SApple OSS Distributions if (kr != KERN_SUCCESS) {
2537*4f1223e8SApple OSS Distributions qos_rv = EINVAL;
2538*4f1223e8SApple OSS Distributions }
2539*4f1223e8SApple OSS Distributions }
2540*4f1223e8SApple OSS Distributions
2541*4f1223e8SApple OSS Distributions voucher:
2542*4f1223e8SApple OSS Distributions if (flags & WORKQ_SET_SELF_VOUCHER_FLAG) {
2543*4f1223e8SApple OSS Distributions kr = thread_set_voucher_name(voucher);
2544*4f1223e8SApple OSS Distributions if (kr != KERN_SUCCESS) {
2545*4f1223e8SApple OSS Distributions voucher_rv = ENOENT;
2546*4f1223e8SApple OSS Distributions goto fixedpri;
2547*4f1223e8SApple OSS Distributions }
2548*4f1223e8SApple OSS Distributions }
2549*4f1223e8SApple OSS Distributions
2550*4f1223e8SApple OSS Distributions fixedpri:
2551*4f1223e8SApple OSS Distributions if (qos_rv) {
2552*4f1223e8SApple OSS Distributions goto done;
2553*4f1223e8SApple OSS Distributions }
2554*4f1223e8SApple OSS Distributions if (flags & WORKQ_SET_SELF_FIXEDPRIORITY_FLAG) {
2555*4f1223e8SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 0};
2556*4f1223e8SApple OSS Distributions
2557*4f1223e8SApple OSS Distributions if (is_wq_thread) {
2558*4f1223e8SApple OSS Distributions /* Not allowed on workqueue threads */
2559*4f1223e8SApple OSS Distributions fixedpri_rv = ENOTSUP;
2560*4f1223e8SApple OSS Distributions goto done;
2561*4f1223e8SApple OSS Distributions }
2562*4f1223e8SApple OSS Distributions
2563*4f1223e8SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2564*4f1223e8SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2565*4f1223e8SApple OSS Distributions if (kr != KERN_SUCCESS) {
2566*4f1223e8SApple OSS Distributions fixedpri_rv = EINVAL;
2567*4f1223e8SApple OSS Distributions goto done;
2568*4f1223e8SApple OSS Distributions }
2569*4f1223e8SApple OSS Distributions } else if (flags & WORKQ_SET_SELF_TIMESHARE_FLAG) {
2570*4f1223e8SApple OSS Distributions thread_extended_policy_data_t extpol = {.timeshare = 1};
2571*4f1223e8SApple OSS Distributions
2572*4f1223e8SApple OSS Distributions if (is_wq_thread) {
2573*4f1223e8SApple OSS Distributions /* Not allowed on workqueue threads */
2574*4f1223e8SApple OSS Distributions fixedpri_rv = ENOTSUP;
2575*4f1223e8SApple OSS Distributions goto done;
2576*4f1223e8SApple OSS Distributions }
2577*4f1223e8SApple OSS Distributions
2578*4f1223e8SApple OSS Distributions kr = thread_policy_set_internal(th, THREAD_EXTENDED_POLICY,
2579*4f1223e8SApple OSS Distributions (thread_policy_t)&extpol, THREAD_EXTENDED_POLICY_COUNT);
2580*4f1223e8SApple OSS Distributions if (kr != KERN_SUCCESS) {
2581*4f1223e8SApple OSS Distributions fixedpri_rv = EINVAL;
2582*4f1223e8SApple OSS Distributions goto done;
2583*4f1223e8SApple OSS Distributions }
2584*4f1223e8SApple OSS Distributions }
2585*4f1223e8SApple OSS Distributions
2586*4f1223e8SApple OSS Distributions done:
2587*4f1223e8SApple OSS Distributions if (qos_rv && voucher_rv) {
2588*4f1223e8SApple OSS Distributions /* Both failed, give that a unique error. */
2589*4f1223e8SApple OSS Distributions return EBADMSG;
2590*4f1223e8SApple OSS Distributions }
2591*4f1223e8SApple OSS Distributions
2592*4f1223e8SApple OSS Distributions if (unbind_rv) {
2593*4f1223e8SApple OSS Distributions return unbind_rv;
2594*4f1223e8SApple OSS Distributions }
2595*4f1223e8SApple OSS Distributions
2596*4f1223e8SApple OSS Distributions if (qos_rv) {
2597*4f1223e8SApple OSS Distributions return qos_rv;
2598*4f1223e8SApple OSS Distributions }
2599*4f1223e8SApple OSS Distributions
2600*4f1223e8SApple OSS Distributions if (voucher_rv) {
2601*4f1223e8SApple OSS Distributions return voucher_rv;
2602*4f1223e8SApple OSS Distributions }
2603*4f1223e8SApple OSS Distributions
2604*4f1223e8SApple OSS Distributions if (fixedpri_rv) {
2605*4f1223e8SApple OSS Distributions return fixedpri_rv;
2606*4f1223e8SApple OSS Distributions }
2607*4f1223e8SApple OSS Distributions
2608*4f1223e8SApple OSS Distributions
2609*4f1223e8SApple OSS Distributions return 0;
2610*4f1223e8SApple OSS Distributions }
2611*4f1223e8SApple OSS Distributions
2612*4f1223e8SApple OSS Distributions static int
bsdthread_add_explicit_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t resource)2613*4f1223e8SApple OSS Distributions bsdthread_add_explicit_override(proc_t p, mach_port_name_t kport,
2614*4f1223e8SApple OSS Distributions pthread_priority_t pp, user_addr_t resource)
2615*4f1223e8SApple OSS Distributions {
2616*4f1223e8SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2617*4f1223e8SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
2618*4f1223e8SApple OSS Distributions return EINVAL;
2619*4f1223e8SApple OSS Distributions }
2620*4f1223e8SApple OSS Distributions
2621*4f1223e8SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2622*4f1223e8SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2623*4f1223e8SApple OSS Distributions if (th == THREAD_NULL) {
2624*4f1223e8SApple OSS Distributions return ESRCH;
2625*4f1223e8SApple OSS Distributions }
2626*4f1223e8SApple OSS Distributions
2627*4f1223e8SApple OSS Distributions int rv = proc_thread_qos_add_override(proc_task(p), th, 0, qos, TRUE,
2628*4f1223e8SApple OSS Distributions resource, THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2629*4f1223e8SApple OSS Distributions
2630*4f1223e8SApple OSS Distributions thread_deallocate(th);
2631*4f1223e8SApple OSS Distributions return rv;
2632*4f1223e8SApple OSS Distributions }
2633*4f1223e8SApple OSS Distributions
2634*4f1223e8SApple OSS Distributions static int
bsdthread_remove_explicit_override(proc_t p,mach_port_name_t kport,user_addr_t resource)2635*4f1223e8SApple OSS Distributions bsdthread_remove_explicit_override(proc_t p, mach_port_name_t kport,
2636*4f1223e8SApple OSS Distributions user_addr_t resource)
2637*4f1223e8SApple OSS Distributions {
2638*4f1223e8SApple OSS Distributions thread_t th = port_name_to_thread(kport,
2639*4f1223e8SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2640*4f1223e8SApple OSS Distributions if (th == THREAD_NULL) {
2641*4f1223e8SApple OSS Distributions return ESRCH;
2642*4f1223e8SApple OSS Distributions }
2643*4f1223e8SApple OSS Distributions
2644*4f1223e8SApple OSS Distributions int rv = proc_thread_qos_remove_override(proc_task(p), th, 0, resource,
2645*4f1223e8SApple OSS Distributions THREAD_QOS_OVERRIDE_TYPE_PTHREAD_EXPLICIT_OVERRIDE);
2646*4f1223e8SApple OSS Distributions
2647*4f1223e8SApple OSS Distributions thread_deallocate(th);
2648*4f1223e8SApple OSS Distributions return rv;
2649*4f1223e8SApple OSS Distributions }
2650*4f1223e8SApple OSS Distributions
2651*4f1223e8SApple OSS Distributions static int
workq_thread_add_dispatch_override(proc_t p,mach_port_name_t kport,pthread_priority_t pp,user_addr_t ulock_addr)2652*4f1223e8SApple OSS Distributions workq_thread_add_dispatch_override(proc_t p, mach_port_name_t kport,
2653*4f1223e8SApple OSS Distributions pthread_priority_t pp, user_addr_t ulock_addr)
2654*4f1223e8SApple OSS Distributions {
2655*4f1223e8SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2656*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2657*4f1223e8SApple OSS Distributions
2658*4f1223e8SApple OSS Distributions thread_qos_t qos_override = _pthread_priority_thread_qos(pp);
2659*4f1223e8SApple OSS Distributions if (qos_override == THREAD_QOS_UNSPECIFIED) {
2660*4f1223e8SApple OSS Distributions return EINVAL;
2661*4f1223e8SApple OSS Distributions }
2662*4f1223e8SApple OSS Distributions
2663*4f1223e8SApple OSS Distributions thread_t thread = port_name_to_thread(kport,
2664*4f1223e8SApple OSS Distributions PORT_INTRANS_THREAD_IN_CURRENT_TASK);
2665*4f1223e8SApple OSS Distributions if (thread == THREAD_NULL) {
2666*4f1223e8SApple OSS Distributions return ESRCH;
2667*4f1223e8SApple OSS Distributions }
2668*4f1223e8SApple OSS Distributions
2669*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2670*4f1223e8SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2671*4f1223e8SApple OSS Distributions thread_deallocate(thread);
2672*4f1223e8SApple OSS Distributions return EPERM;
2673*4f1223e8SApple OSS Distributions }
2674*4f1223e8SApple OSS Distributions
2675*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_dispatch | DBG_FUNC_NONE,
2676*4f1223e8SApple OSS Distributions wq, thread_tid(thread), 1, pp);
2677*4f1223e8SApple OSS Distributions
2678*4f1223e8SApple OSS Distributions thread_mtx_lock(thread);
2679*4f1223e8SApple OSS Distributions
2680*4f1223e8SApple OSS Distributions if (ulock_addr) {
2681*4f1223e8SApple OSS Distributions uint32_t val;
2682*4f1223e8SApple OSS Distributions int rc;
2683*4f1223e8SApple OSS Distributions /*
2684*4f1223e8SApple OSS Distributions * Workaround lack of explicit support for 'no-fault copyin'
2685*4f1223e8SApple OSS Distributions * <rdar://problem/24999882>, as disabling preemption prevents paging in
2686*4f1223e8SApple OSS Distributions */
2687*4f1223e8SApple OSS Distributions disable_preemption();
2688*4f1223e8SApple OSS Distributions rc = copyin_atomic32(ulock_addr, &val);
2689*4f1223e8SApple OSS Distributions enable_preemption();
2690*4f1223e8SApple OSS Distributions if (rc == 0 && ulock_owner_value_to_port_name(val) != kport) {
2691*4f1223e8SApple OSS Distributions goto out;
2692*4f1223e8SApple OSS Distributions }
2693*4f1223e8SApple OSS Distributions }
2694*4f1223e8SApple OSS Distributions
2695*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
2696*4f1223e8SApple OSS Distributions
2697*4f1223e8SApple OSS Distributions old_pri = uth->uu_workq_pri;
2698*4f1223e8SApple OSS Distributions if (old_pri.qos_override >= qos_override) {
2699*4f1223e8SApple OSS Distributions /* Nothing to do */
2700*4f1223e8SApple OSS Distributions } else if (thread == current_thread()) {
2701*4f1223e8SApple OSS Distributions new_pri = old_pri;
2702*4f1223e8SApple OSS Distributions new_pri.qos_override = qos_override;
2703*4f1223e8SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2704*4f1223e8SApple OSS Distributions } else {
2705*4f1223e8SApple OSS Distributions uth->uu_workq_pri.qos_override = qos_override;
2706*4f1223e8SApple OSS Distributions if (qos_override > workq_pri_override(old_pri)) {
2707*4f1223e8SApple OSS Distributions thread_set_workq_override(thread, qos_override);
2708*4f1223e8SApple OSS Distributions }
2709*4f1223e8SApple OSS Distributions }
2710*4f1223e8SApple OSS Distributions
2711*4f1223e8SApple OSS Distributions workq_unlock(wq);
2712*4f1223e8SApple OSS Distributions
2713*4f1223e8SApple OSS Distributions out:
2714*4f1223e8SApple OSS Distributions thread_mtx_unlock(thread);
2715*4f1223e8SApple OSS Distributions thread_deallocate(thread);
2716*4f1223e8SApple OSS Distributions return 0;
2717*4f1223e8SApple OSS Distributions }
2718*4f1223e8SApple OSS Distributions
2719*4f1223e8SApple OSS Distributions static int
workq_thread_reset_dispatch_override(proc_t p,thread_t thread)2720*4f1223e8SApple OSS Distributions workq_thread_reset_dispatch_override(proc_t p, thread_t thread)
2721*4f1223e8SApple OSS Distributions {
2722*4f1223e8SApple OSS Distributions struct uu_workq_policy old_pri, new_pri;
2723*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2724*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2725*4f1223e8SApple OSS Distributions
2726*4f1223e8SApple OSS Distributions if ((thread_get_tag(thread) & THREAD_TAG_WORKQUEUE) == 0) {
2727*4f1223e8SApple OSS Distributions return EPERM;
2728*4f1223e8SApple OSS Distributions }
2729*4f1223e8SApple OSS Distributions
2730*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_override_reset | DBG_FUNC_NONE, wq, 0, 0, 0);
2731*4f1223e8SApple OSS Distributions
2732*4f1223e8SApple OSS Distributions /*
2733*4f1223e8SApple OSS Distributions * workq_thread_add_dispatch_override takes the thread mutex before doing the
2734*4f1223e8SApple OSS Distributions * copyin to validate the drainer and apply the override. We need to do the
2735*4f1223e8SApple OSS Distributions * same here. See rdar://84472518
2736*4f1223e8SApple OSS Distributions */
2737*4f1223e8SApple OSS Distributions thread_mtx_lock(thread);
2738*4f1223e8SApple OSS Distributions
2739*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
2740*4f1223e8SApple OSS Distributions old_pri = new_pri = uth->uu_workq_pri;
2741*4f1223e8SApple OSS Distributions new_pri.qos_override = THREAD_QOS_UNSPECIFIED;
2742*4f1223e8SApple OSS Distributions workq_thread_update_bucket(p, wq, uth, old_pri, new_pri, false);
2743*4f1223e8SApple OSS Distributions workq_unlock(wq);
2744*4f1223e8SApple OSS Distributions
2745*4f1223e8SApple OSS Distributions thread_mtx_unlock(thread);
2746*4f1223e8SApple OSS Distributions return 0;
2747*4f1223e8SApple OSS Distributions }
2748*4f1223e8SApple OSS Distributions
2749*4f1223e8SApple OSS Distributions static int
workq_thread_allow_kill(__unused proc_t p,thread_t thread,bool enable)2750*4f1223e8SApple OSS Distributions workq_thread_allow_kill(__unused proc_t p, thread_t thread, bool enable)
2751*4f1223e8SApple OSS Distributions {
2752*4f1223e8SApple OSS Distributions if (!(thread_get_tag(thread) & THREAD_TAG_WORKQUEUE)) {
2753*4f1223e8SApple OSS Distributions // If the thread isn't a workqueue thread, don't set the
2754*4f1223e8SApple OSS Distributions // kill_allowed bit; however, we still need to return 0
2755*4f1223e8SApple OSS Distributions // instead of an error code since this code is executed
2756*4f1223e8SApple OSS Distributions // on the abort path which needs to not depend on the
2757*4f1223e8SApple OSS Distributions // pthread_t (returning an error depends on pthread_t via
2758*4f1223e8SApple OSS Distributions // cerror_nocancel)
2759*4f1223e8SApple OSS Distributions return 0;
2760*4f1223e8SApple OSS Distributions }
2761*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
2762*4f1223e8SApple OSS Distributions uth->uu_workq_pthread_kill_allowed = enable;
2763*4f1223e8SApple OSS Distributions return 0;
2764*4f1223e8SApple OSS Distributions }
2765*4f1223e8SApple OSS Distributions
2766*4f1223e8SApple OSS Distributions static int
workq_allow_sigmask(proc_t p,sigset_t mask)2767*4f1223e8SApple OSS Distributions workq_allow_sigmask(proc_t p, sigset_t mask)
2768*4f1223e8SApple OSS Distributions {
2769*4f1223e8SApple OSS Distributions if (mask & workq_threadmask) {
2770*4f1223e8SApple OSS Distributions return EINVAL;
2771*4f1223e8SApple OSS Distributions }
2772*4f1223e8SApple OSS Distributions
2773*4f1223e8SApple OSS Distributions proc_lock(p);
2774*4f1223e8SApple OSS Distributions p->p_workq_allow_sigmask |= mask;
2775*4f1223e8SApple OSS Distributions proc_unlock(p);
2776*4f1223e8SApple OSS Distributions
2777*4f1223e8SApple OSS Distributions return 0;
2778*4f1223e8SApple OSS Distributions }
2779*4f1223e8SApple OSS Distributions
2780*4f1223e8SApple OSS Distributions static int
bsdthread_get_max_parallelism(thread_qos_t qos,unsigned long flags,int * retval)2781*4f1223e8SApple OSS Distributions bsdthread_get_max_parallelism(thread_qos_t qos, unsigned long flags,
2782*4f1223e8SApple OSS Distributions int *retval)
2783*4f1223e8SApple OSS Distributions {
2784*4f1223e8SApple OSS Distributions static_assert(QOS_PARALLELISM_COUNT_LOGICAL ==
2785*4f1223e8SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_COUNT_LOGICAL, "logical");
2786*4f1223e8SApple OSS Distributions static_assert(QOS_PARALLELISM_REALTIME ==
2787*4f1223e8SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_REALTIME, "realtime");
2788*4f1223e8SApple OSS Distributions static_assert(QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE ==
2789*4f1223e8SApple OSS Distributions _PTHREAD_QOS_PARALLELISM_CLUSTER_SHARED_RSRC, "cluster shared resource");
2790*4f1223e8SApple OSS Distributions
2791*4f1223e8SApple OSS Distributions if (flags & ~(QOS_PARALLELISM_REALTIME | QOS_PARALLELISM_COUNT_LOGICAL | QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE)) {
2792*4f1223e8SApple OSS Distributions return EINVAL;
2793*4f1223e8SApple OSS Distributions }
2794*4f1223e8SApple OSS Distributions
2795*4f1223e8SApple OSS Distributions /* No units are present */
2796*4f1223e8SApple OSS Distributions if (flags & QOS_PARALLELISM_CLUSTER_SHARED_RESOURCE) {
2797*4f1223e8SApple OSS Distributions return ENOTSUP;
2798*4f1223e8SApple OSS Distributions }
2799*4f1223e8SApple OSS Distributions
2800*4f1223e8SApple OSS Distributions if (flags & QOS_PARALLELISM_REALTIME) {
2801*4f1223e8SApple OSS Distributions if (qos) {
2802*4f1223e8SApple OSS Distributions return EINVAL;
2803*4f1223e8SApple OSS Distributions }
2804*4f1223e8SApple OSS Distributions } else if (qos == THREAD_QOS_UNSPECIFIED || qos >= THREAD_QOS_LAST) {
2805*4f1223e8SApple OSS Distributions return EINVAL;
2806*4f1223e8SApple OSS Distributions }
2807*4f1223e8SApple OSS Distributions
2808*4f1223e8SApple OSS Distributions *retval = qos_max_parallelism(qos, flags);
2809*4f1223e8SApple OSS Distributions return 0;
2810*4f1223e8SApple OSS Distributions }
2811*4f1223e8SApple OSS Distributions
2812*4f1223e8SApple OSS Distributions static int
bsdthread_dispatch_apply_attr(__unused struct proc * p,thread_t thread,unsigned long flags,uint64_t value1,__unused uint64_t value2)2813*4f1223e8SApple OSS Distributions bsdthread_dispatch_apply_attr(__unused struct proc *p, thread_t thread,
2814*4f1223e8SApple OSS Distributions unsigned long flags, uint64_t value1, __unused uint64_t value2)
2815*4f1223e8SApple OSS Distributions {
2816*4f1223e8SApple OSS Distributions uint32_t apply_worker_index;
2817*4f1223e8SApple OSS Distributions kern_return_t kr;
2818*4f1223e8SApple OSS Distributions
2819*4f1223e8SApple OSS Distributions switch (flags) {
2820*4f1223e8SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_SET:
2821*4f1223e8SApple OSS Distributions apply_worker_index = (uint32_t)value1;
2822*4f1223e8SApple OSS Distributions kr = thread_shared_rsrc_policy_set(thread, apply_worker_index, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2823*4f1223e8SApple OSS Distributions /*
2824*4f1223e8SApple OSS Distributions * KERN_INVALID_POLICY indicates that the thread was trying to bind to a
2825*4f1223e8SApple OSS Distributions * cluster which it was not eligible to execute on.
2826*4f1223e8SApple OSS Distributions */
2827*4f1223e8SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : ((kr == KERN_INVALID_POLICY) ? ENOTSUP : EINVAL);
2828*4f1223e8SApple OSS Distributions case _PTHREAD_DISPATCH_APPLY_ATTR_CLUSTER_SHARED_RSRC_CLEAR:
2829*4f1223e8SApple OSS Distributions kr = thread_shared_rsrc_policy_clear(thread, CLUSTER_SHARED_RSRC_TYPE_RR, SHARED_RSRC_POLICY_AGENT_DISPATCH);
2830*4f1223e8SApple OSS Distributions return (kr == KERN_SUCCESS) ? 0 : EINVAL;
2831*4f1223e8SApple OSS Distributions default:
2832*4f1223e8SApple OSS Distributions return EINVAL;
2833*4f1223e8SApple OSS Distributions }
2834*4f1223e8SApple OSS Distributions }
2835*4f1223e8SApple OSS Distributions
2836*4f1223e8SApple OSS Distributions #define ENSURE_UNUSED(arg) \
2837*4f1223e8SApple OSS Distributions ({ if ((arg) != 0) { return EINVAL; } })
2838*4f1223e8SApple OSS Distributions
2839*4f1223e8SApple OSS Distributions int
bsdthread_ctl(struct proc * p,struct bsdthread_ctl_args * uap,int * retval)2840*4f1223e8SApple OSS Distributions bsdthread_ctl(struct proc *p, struct bsdthread_ctl_args *uap, int *retval)
2841*4f1223e8SApple OSS Distributions {
2842*4f1223e8SApple OSS Distributions switch (uap->cmd) {
2843*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_START:
2844*4f1223e8SApple OSS Distributions return bsdthread_add_explicit_override(p, (mach_port_name_t)uap->arg1,
2845*4f1223e8SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2846*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_END:
2847*4f1223e8SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2848*4f1223e8SApple OSS Distributions return bsdthread_remove_explicit_override(p, (mach_port_name_t)uap->arg1,
2849*4f1223e8SApple OSS Distributions (user_addr_t)uap->arg2);
2850*4f1223e8SApple OSS Distributions
2851*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_DISPATCH:
2852*4f1223e8SApple OSS Distributions return workq_thread_add_dispatch_override(p, (mach_port_name_t)uap->arg1,
2853*4f1223e8SApple OSS Distributions (pthread_priority_t)uap->arg2, uap->arg3);
2854*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_OVERRIDE_RESET:
2855*4f1223e8SApple OSS Distributions return workq_thread_reset_dispatch_override(p, current_thread());
2856*4f1223e8SApple OSS Distributions
2857*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_SET_SELF:
2858*4f1223e8SApple OSS Distributions return bsdthread_set_self(p, current_thread(),
2859*4f1223e8SApple OSS Distributions (pthread_priority_t)uap->arg1, (mach_port_name_t)uap->arg2,
2860*4f1223e8SApple OSS Distributions (enum workq_set_self_flags)uap->arg3);
2861*4f1223e8SApple OSS Distributions
2862*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_MAX_PARALLELISM:
2863*4f1223e8SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2864*4f1223e8SApple OSS Distributions return bsdthread_get_max_parallelism((thread_qos_t)uap->arg1,
2865*4f1223e8SApple OSS Distributions (unsigned long)uap->arg2, retval);
2866*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_KILL:
2867*4f1223e8SApple OSS Distributions ENSURE_UNUSED(uap->arg2);
2868*4f1223e8SApple OSS Distributions ENSURE_UNUSED(uap->arg3);
2869*4f1223e8SApple OSS Distributions return workq_thread_allow_kill(p, current_thread(), (bool)uap->arg1);
2870*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_DISPATCH_APPLY_ATTR:
2871*4f1223e8SApple OSS Distributions return bsdthread_dispatch_apply_attr(p, current_thread(),
2872*4f1223e8SApple OSS Distributions (unsigned long)uap->arg1, (uint64_t)uap->arg2,
2873*4f1223e8SApple OSS Distributions (uint64_t)uap->arg3);
2874*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_WORKQ_ALLOW_SIGMASK:
2875*4f1223e8SApple OSS Distributions return workq_allow_sigmask(p, (int)uap->arg1);
2876*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_SET_QOS:
2877*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_ADD:
2878*4f1223e8SApple OSS Distributions case BSDTHREAD_CTL_QOS_DISPATCH_ASYNCHRONOUS_OVERRIDE_RESET:
2879*4f1223e8SApple OSS Distributions /* no longer supported */
2880*4f1223e8SApple OSS Distributions return ENOTSUP;
2881*4f1223e8SApple OSS Distributions
2882*4f1223e8SApple OSS Distributions default:
2883*4f1223e8SApple OSS Distributions return EINVAL;
2884*4f1223e8SApple OSS Distributions }
2885*4f1223e8SApple OSS Distributions }
2886*4f1223e8SApple OSS Distributions
2887*4f1223e8SApple OSS Distributions #pragma mark workqueue thread manipulation
2888*4f1223e8SApple OSS Distributions
2889*4f1223e8SApple OSS Distributions static void __dead2
2890*4f1223e8SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2891*4f1223e8SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2892*4f1223e8SApple OSS Distributions
2893*4f1223e8SApple OSS Distributions static void __dead2
2894*4f1223e8SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
2895*4f1223e8SApple OSS Distributions struct uthread *uth, uint32_t setup_flags);
2896*4f1223e8SApple OSS Distributions
2897*4f1223e8SApple OSS Distributions static void workq_setup_and_run(proc_t p, struct uthread *uth, int flags) __dead2;
2898*4f1223e8SApple OSS Distributions
2899*4f1223e8SApple OSS Distributions #if KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD
2900*4f1223e8SApple OSS Distributions static inline uint64_t
workq_trace_req_id(workq_threadreq_t req)2901*4f1223e8SApple OSS Distributions workq_trace_req_id(workq_threadreq_t req)
2902*4f1223e8SApple OSS Distributions {
2903*4f1223e8SApple OSS Distributions struct kqworkloop *kqwl;
2904*4f1223e8SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
2905*4f1223e8SApple OSS Distributions kqwl = __container_of(req, struct kqworkloop, kqwl_request);
2906*4f1223e8SApple OSS Distributions return kqwl->kqwl_dynamicid;
2907*4f1223e8SApple OSS Distributions }
2908*4f1223e8SApple OSS Distributions
2909*4f1223e8SApple OSS Distributions return VM_KERNEL_ADDRHIDE(req);
2910*4f1223e8SApple OSS Distributions }
2911*4f1223e8SApple OSS Distributions #endif
2912*4f1223e8SApple OSS Distributions
2913*4f1223e8SApple OSS Distributions /**
2914*4f1223e8SApple OSS Distributions * Entry point for libdispatch to ask for threads
2915*4f1223e8SApple OSS Distributions */
2916*4f1223e8SApple OSS Distributions static int
workq_reqthreads(struct proc * p,uint32_t reqcount,pthread_priority_t pp,bool cooperative)2917*4f1223e8SApple OSS Distributions workq_reqthreads(struct proc *p, uint32_t reqcount, pthread_priority_t pp, bool cooperative)
2918*4f1223e8SApple OSS Distributions {
2919*4f1223e8SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pp);
2920*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
2921*4f1223e8SApple OSS Distributions uint32_t unpaced, upcall_flags = WQ_FLAG_THREAD_NEWSPI;
2922*4f1223e8SApple OSS Distributions int ret = 0;
2923*4f1223e8SApple OSS Distributions
2924*4f1223e8SApple OSS Distributions if (wq == NULL || reqcount <= 0 || reqcount > UINT16_MAX ||
2925*4f1223e8SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
2926*4f1223e8SApple OSS Distributions ret = EINVAL;
2927*4f1223e8SApple OSS Distributions goto exit;
2928*4f1223e8SApple OSS Distributions }
2929*4f1223e8SApple OSS Distributions
2930*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_wqops_reqthreads | DBG_FUNC_NONE,
2931*4f1223e8SApple OSS Distributions wq, reqcount, pp, cooperative);
2932*4f1223e8SApple OSS Distributions
2933*4f1223e8SApple OSS Distributions workq_threadreq_t req = zalloc(workq_zone_threadreq);
2934*4f1223e8SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
2935*4f1223e8SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
2936*4f1223e8SApple OSS Distributions req->tr_qos = qos;
2937*4f1223e8SApple OSS Distributions workq_tr_flags_t tr_flags = 0;
2938*4f1223e8SApple OSS Distributions
2939*4f1223e8SApple OSS Distributions if (pp & _PTHREAD_PRIORITY_OVERCOMMIT_FLAG) {
2940*4f1223e8SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_OVERCOMMIT;
2941*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
2942*4f1223e8SApple OSS Distributions }
2943*4f1223e8SApple OSS Distributions
2944*4f1223e8SApple OSS Distributions if (cooperative) {
2945*4f1223e8SApple OSS Distributions tr_flags |= WORKQ_TR_FLAG_COOPERATIVE;
2946*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
2947*4f1223e8SApple OSS Distributions
2948*4f1223e8SApple OSS Distributions if (reqcount > 1) {
2949*4f1223e8SApple OSS Distributions ret = ENOTSUP;
2950*4f1223e8SApple OSS Distributions goto free_and_exit;
2951*4f1223e8SApple OSS Distributions }
2952*4f1223e8SApple OSS Distributions }
2953*4f1223e8SApple OSS Distributions
2954*4f1223e8SApple OSS Distributions /* A thread request cannot be both overcommit and cooperative */
2955*4f1223e8SApple OSS Distributions if (workq_tr_is_cooperative(tr_flags) &&
2956*4f1223e8SApple OSS Distributions workq_tr_is_overcommit(tr_flags)) {
2957*4f1223e8SApple OSS Distributions ret = EINVAL;
2958*4f1223e8SApple OSS Distributions goto free_and_exit;
2959*4f1223e8SApple OSS Distributions }
2960*4f1223e8SApple OSS Distributions req->tr_flags = tr_flags;
2961*4f1223e8SApple OSS Distributions
2962*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE,
2963*4f1223e8SApple OSS Distributions wq, workq_trace_req_id(req), req->tr_qos, reqcount);
2964*4f1223e8SApple OSS Distributions
2965*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
2966*4f1223e8SApple OSS Distributions do {
2967*4f1223e8SApple OSS Distributions if (_wq_exiting(wq)) {
2968*4f1223e8SApple OSS Distributions goto unlock_and_exit;
2969*4f1223e8SApple OSS Distributions }
2970*4f1223e8SApple OSS Distributions
2971*4f1223e8SApple OSS Distributions /*
2972*4f1223e8SApple OSS Distributions * When userspace is asking for parallelism, wakeup up to (reqcount - 1)
2973*4f1223e8SApple OSS Distributions * threads without pacing, to inform the scheduler of that workload.
2974*4f1223e8SApple OSS Distributions *
2975*4f1223e8SApple OSS Distributions * The last requests, or the ones that failed the admission checks are
2976*4f1223e8SApple OSS Distributions * enqueued and go through the regular creator codepath.
2977*4f1223e8SApple OSS Distributions *
2978*4f1223e8SApple OSS Distributions * If there aren't enough threads, add one, but re-evaluate everything
2979*4f1223e8SApple OSS Distributions * as conditions may now have changed.
2980*4f1223e8SApple OSS Distributions */
2981*4f1223e8SApple OSS Distributions unpaced = reqcount - 1;
2982*4f1223e8SApple OSS Distributions
2983*4f1223e8SApple OSS Distributions if (reqcount > 1) {
2984*4f1223e8SApple OSS Distributions /* We don't handle asking for parallelism on the cooperative
2985*4f1223e8SApple OSS Distributions * workqueue just yet */
2986*4f1223e8SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
2987*4f1223e8SApple OSS Distributions
2988*4f1223e8SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
2989*4f1223e8SApple OSS Distributions unpaced = workq_constrained_allowance(wq, qos, NULL, false, true);
2990*4f1223e8SApple OSS Distributions if (unpaced >= reqcount - 1) {
2991*4f1223e8SApple OSS Distributions unpaced = reqcount - 1;
2992*4f1223e8SApple OSS Distributions }
2993*4f1223e8SApple OSS Distributions }
2994*4f1223e8SApple OSS Distributions }
2995*4f1223e8SApple OSS Distributions
2996*4f1223e8SApple OSS Distributions /*
2997*4f1223e8SApple OSS Distributions * This path does not currently handle custom workloop parameters
2998*4f1223e8SApple OSS Distributions * when creating threads for parallelism.
2999*4f1223e8SApple OSS Distributions */
3000*4f1223e8SApple OSS Distributions assert(!(req->tr_flags & WORKQ_TR_FLAG_WL_PARAMS));
3001*4f1223e8SApple OSS Distributions
3002*4f1223e8SApple OSS Distributions /*
3003*4f1223e8SApple OSS Distributions * This is a trimmed down version of workq_threadreq_bind_and_unlock()
3004*4f1223e8SApple OSS Distributions */
3005*4f1223e8SApple OSS Distributions while (unpaced > 0 && wq->wq_thidlecount) {
3006*4f1223e8SApple OSS Distributions struct uthread *uth;
3007*4f1223e8SApple OSS Distributions bool needs_wakeup;
3008*4f1223e8SApple OSS Distributions uint8_t uu_flags = UT_WORKQ_EARLY_BOUND;
3009*4f1223e8SApple OSS Distributions
3010*4f1223e8SApple OSS Distributions if (workq_tr_is_overcommit(req->tr_flags)) {
3011*4f1223e8SApple OSS Distributions uu_flags |= UT_WORKQ_OVERCOMMIT;
3012*4f1223e8SApple OSS Distributions }
3013*4f1223e8SApple OSS Distributions
3014*4f1223e8SApple OSS Distributions uth = workq_pop_idle_thread(wq, uu_flags, &needs_wakeup);
3015*4f1223e8SApple OSS Distributions
3016*4f1223e8SApple OSS Distributions _wq_thactive_inc(wq, qos);
3017*4f1223e8SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(qos)]++;
3018*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
3019*4f1223e8SApple OSS Distributions wq->wq_fulfilled++;
3020*4f1223e8SApple OSS Distributions
3021*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
3022*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.thread_request = req;
3023*4f1223e8SApple OSS Distributions if (needs_wakeup) {
3024*4f1223e8SApple OSS Distributions workq_thread_wakeup(uth);
3025*4f1223e8SApple OSS Distributions }
3026*4f1223e8SApple OSS Distributions unpaced--;
3027*4f1223e8SApple OSS Distributions reqcount--;
3028*4f1223e8SApple OSS Distributions }
3029*4f1223e8SApple OSS Distributions } while (unpaced && wq->wq_nthreads < wq_max_threads &&
3030*4f1223e8SApple OSS Distributions (workq_add_new_idle_thread(p, wq, workq_unpark_continue,
3031*4f1223e8SApple OSS Distributions false, NULL) == KERN_SUCCESS));
3032*4f1223e8SApple OSS Distributions
3033*4f1223e8SApple OSS Distributions if (_wq_exiting(wq)) {
3034*4f1223e8SApple OSS Distributions goto unlock_and_exit;
3035*4f1223e8SApple OSS Distributions }
3036*4f1223e8SApple OSS Distributions
3037*4f1223e8SApple OSS Distributions req->tr_count = (uint16_t)reqcount;
3038*4f1223e8SApple OSS Distributions if (workq_threadreq_enqueue(wq, req)) {
3039*4f1223e8SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
3040*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
3041*4f1223e8SApple OSS Distributions }
3042*4f1223e8SApple OSS Distributions workq_unlock(wq);
3043*4f1223e8SApple OSS Distributions return 0;
3044*4f1223e8SApple OSS Distributions
3045*4f1223e8SApple OSS Distributions unlock_and_exit:
3046*4f1223e8SApple OSS Distributions workq_unlock(wq);
3047*4f1223e8SApple OSS Distributions free_and_exit:
3048*4f1223e8SApple OSS Distributions zfree(workq_zone_threadreq, req);
3049*4f1223e8SApple OSS Distributions exit:
3050*4f1223e8SApple OSS Distributions return ret;
3051*4f1223e8SApple OSS Distributions }
3052*4f1223e8SApple OSS Distributions
3053*4f1223e8SApple OSS Distributions bool
workq_kern_threadreq_initiate(struct proc * p,workq_threadreq_t req,struct turnstile * workloop_ts,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3054*4f1223e8SApple OSS Distributions workq_kern_threadreq_initiate(struct proc *p, workq_threadreq_t req,
3055*4f1223e8SApple OSS Distributions struct turnstile *workloop_ts, thread_qos_t qos,
3056*4f1223e8SApple OSS Distributions workq_kern_threadreq_flags_t flags)
3057*4f1223e8SApple OSS Distributions {
3058*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3059*4f1223e8SApple OSS Distributions struct uthread *uth = NULL;
3060*4f1223e8SApple OSS Distributions
3061*4f1223e8SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_WORKLOOP | WORKQ_TR_FLAG_KEVENT));
3062*4f1223e8SApple OSS Distributions
3063*4f1223e8SApple OSS Distributions /*
3064*4f1223e8SApple OSS Distributions * For any new initialization changes done to workqueue thread request below,
3065*4f1223e8SApple OSS Distributions * please also consider if they are relevant to permanently bound thread
3066*4f1223e8SApple OSS Distributions * request. See workq_kern_threadreq_permanent_bind.
3067*4f1223e8SApple OSS Distributions */
3068*4f1223e8SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3069*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(req);
3070*4f1223e8SApple OSS Distributions qos = thread_workq_qos_for_pri(trp.trp_pri);
3071*4f1223e8SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3072*4f1223e8SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3073*4f1223e8SApple OSS Distributions }
3074*4f1223e8SApple OSS Distributions }
3075*4f1223e8SApple OSS Distributions
3076*4f1223e8SApple OSS Distributions assert(req->tr_state == WORKQ_TR_STATE_IDLE);
3077*4f1223e8SApple OSS Distributions priority_queue_entry_init(&req->tr_entry);
3078*4f1223e8SApple OSS Distributions req->tr_count = 1;
3079*4f1223e8SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3080*4f1223e8SApple OSS Distributions req->tr_qos = qos;
3081*4f1223e8SApple OSS Distributions
3082*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_initiate | DBG_FUNC_NONE, wq,
3083*4f1223e8SApple OSS Distributions workq_trace_req_id(req), qos, 1);
3084*4f1223e8SApple OSS Distributions
3085*4f1223e8SApple OSS Distributions if (flags & WORKQ_THREADREQ_ATTEMPT_REBIND) {
3086*4f1223e8SApple OSS Distributions /*
3087*4f1223e8SApple OSS Distributions * we're called back synchronously from the context of
3088*4f1223e8SApple OSS Distributions * kqueue_threadreq_unbind from within workq_thread_return()
3089*4f1223e8SApple OSS Distributions * we can try to match up this thread with this request !
3090*4f1223e8SApple OSS Distributions */
3091*4f1223e8SApple OSS Distributions uth = current_uthread();
3092*4f1223e8SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3093*4f1223e8SApple OSS Distributions }
3094*4f1223e8SApple OSS Distributions
3095*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3096*4f1223e8SApple OSS Distributions if (_wq_exiting(wq)) {
3097*4f1223e8SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_IDLE;
3098*4f1223e8SApple OSS Distributions workq_unlock(wq);
3099*4f1223e8SApple OSS Distributions return false;
3100*4f1223e8SApple OSS Distributions }
3101*4f1223e8SApple OSS Distributions
3102*4f1223e8SApple OSS Distributions if (uth && workq_threadreq_admissible(wq, uth, req)) {
3103*4f1223e8SApple OSS Distributions /* This is the case of the rebind - we were about to park and unbind
3104*4f1223e8SApple OSS Distributions * when more events came so keep the binding.
3105*4f1223e8SApple OSS Distributions */
3106*4f1223e8SApple OSS Distributions assert(uth != wq->wq_creator);
3107*4f1223e8SApple OSS Distributions
3108*4f1223e8SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket != req->tr_qos) {
3109*4f1223e8SApple OSS Distributions _wq_thactive_move(wq, uth->uu_workq_pri.qos_bucket, req->tr_qos);
3110*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ false);
3111*4f1223e8SApple OSS Distributions }
3112*4f1223e8SApple OSS Distributions /*
3113*4f1223e8SApple OSS Distributions * We're called from workq_kern_threadreq_initiate()
3114*4f1223e8SApple OSS Distributions * due to an unbind, with the kq req held.
3115*4f1223e8SApple OSS Distributions */
3116*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
3117*4f1223e8SApple OSS Distributions workq_trace_req_id(req), req->tr_flags, 0);
3118*4f1223e8SApple OSS Distributions wq->wq_fulfilled++;
3119*4f1223e8SApple OSS Distributions
3120*4f1223e8SApple OSS Distributions kqueue_threadreq_bind(p, req, get_machthread(uth), 0);
3121*4f1223e8SApple OSS Distributions } else {
3122*4f1223e8SApple OSS Distributions if (workloop_ts) {
3123*4f1223e8SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3124*4f1223e8SApple OSS Distributions turnstile_update_inheritor(workloop_ts, wq->wq_turnstile,
3125*4f1223e8SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
3126*4f1223e8SApple OSS Distributions turnstile_update_inheritor_complete(workloop_ts,
3127*4f1223e8SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
3128*4f1223e8SApple OSS Distributions });
3129*4f1223e8SApple OSS Distributions }
3130*4f1223e8SApple OSS Distributions
3131*4f1223e8SApple OSS Distributions bool reevaluate_creator_thread_group = false;
3132*4f1223e8SApple OSS Distributions #if CONFIG_PREADOPT_TG
3133*4f1223e8SApple OSS Distributions reevaluate_creator_thread_group = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3134*4f1223e8SApple OSS Distributions #endif
3135*4f1223e8SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3136*4f1223e8SApple OSS Distributions * the creator needs a thread group pre-adoption */
3137*4f1223e8SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_thread_group) {
3138*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3139*4f1223e8SApple OSS Distributions }
3140*4f1223e8SApple OSS Distributions }
3141*4f1223e8SApple OSS Distributions
3142*4f1223e8SApple OSS Distributions workq_unlock(wq);
3143*4f1223e8SApple OSS Distributions
3144*4f1223e8SApple OSS Distributions return true;
3145*4f1223e8SApple OSS Distributions }
3146*4f1223e8SApple OSS Distributions
3147*4f1223e8SApple OSS Distributions void
workq_kern_threadreq_modify(struct proc * p,workq_threadreq_t req,thread_qos_t qos,workq_kern_threadreq_flags_t flags)3148*4f1223e8SApple OSS Distributions workq_kern_threadreq_modify(struct proc *p, workq_threadreq_t req,
3149*4f1223e8SApple OSS Distributions thread_qos_t qos, workq_kern_threadreq_flags_t flags)
3150*4f1223e8SApple OSS Distributions {
3151*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3152*4f1223e8SApple OSS Distributions bool make_overcommit = false;
3153*4f1223e8SApple OSS Distributions
3154*4f1223e8SApple OSS Distributions if (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3155*4f1223e8SApple OSS Distributions /* Requests outside-of-QoS shouldn't accept modify operations */
3156*4f1223e8SApple OSS Distributions return;
3157*4f1223e8SApple OSS Distributions }
3158*4f1223e8SApple OSS Distributions
3159*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3160*4f1223e8SApple OSS Distributions
3161*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3162*4f1223e8SApple OSS Distributions assert(req->tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP));
3163*4f1223e8SApple OSS Distributions
3164*4f1223e8SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3165*4f1223e8SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread, 0);
3166*4f1223e8SApple OSS Distributions workq_unlock(wq);
3167*4f1223e8SApple OSS Distributions return;
3168*4f1223e8SApple OSS Distributions }
3169*4f1223e8SApple OSS Distributions
3170*4f1223e8SApple OSS Distributions if (flags & WORKQ_THREADREQ_MAKE_OVERCOMMIT) {
3171*4f1223e8SApple OSS Distributions /* TODO (rokhinip): We come into this code path for kqwl thread
3172*4f1223e8SApple OSS Distributions * requests. kqwl requests cannot be cooperative.
3173*4f1223e8SApple OSS Distributions */
3174*4f1223e8SApple OSS Distributions assert(!workq_threadreq_is_cooperative(req));
3175*4f1223e8SApple OSS Distributions
3176*4f1223e8SApple OSS Distributions make_overcommit = workq_threadreq_is_nonovercommit(req);
3177*4f1223e8SApple OSS Distributions }
3178*4f1223e8SApple OSS Distributions
3179*4f1223e8SApple OSS Distributions if (_wq_exiting(wq) || (req->tr_qos == qos && !make_overcommit)) {
3180*4f1223e8SApple OSS Distributions workq_unlock(wq);
3181*4f1223e8SApple OSS Distributions return;
3182*4f1223e8SApple OSS Distributions }
3183*4f1223e8SApple OSS Distributions
3184*4f1223e8SApple OSS Distributions assert(req->tr_count == 1);
3185*4f1223e8SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3186*4f1223e8SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3187*4f1223e8SApple OSS Distributions }
3188*4f1223e8SApple OSS Distributions
3189*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_request_modify | DBG_FUNC_NONE, wq,
3190*4f1223e8SApple OSS Distributions workq_trace_req_id(req), qos, 0);
3191*4f1223e8SApple OSS Distributions
3192*4f1223e8SApple OSS Distributions struct priority_queue_sched_max *pq = workq_priority_queue_for_req(wq, req);
3193*4f1223e8SApple OSS Distributions workq_threadreq_t req_max;
3194*4f1223e8SApple OSS Distributions
3195*4f1223e8SApple OSS Distributions /*
3196*4f1223e8SApple OSS Distributions * Stage 1: Dequeue the request from its priority queue.
3197*4f1223e8SApple OSS Distributions *
3198*4f1223e8SApple OSS Distributions * If we dequeue the root item of the constrained priority queue,
3199*4f1223e8SApple OSS Distributions * maintain the best constrained request qos invariant.
3200*4f1223e8SApple OSS Distributions */
3201*4f1223e8SApple OSS Distributions if (priority_queue_remove(pq, &req->tr_entry)) {
3202*4f1223e8SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
3203*4f1223e8SApple OSS Distributions _wq_thactive_refresh_best_constrained_req_qos(wq);
3204*4f1223e8SApple OSS Distributions }
3205*4f1223e8SApple OSS Distributions }
3206*4f1223e8SApple OSS Distributions
3207*4f1223e8SApple OSS Distributions /*
3208*4f1223e8SApple OSS Distributions * Stage 2: Apply changes to the thread request
3209*4f1223e8SApple OSS Distributions *
3210*4f1223e8SApple OSS Distributions * If the item will not become the root of the priority queue it belongs to,
3211*4f1223e8SApple OSS Distributions * then we need to wait in line, just enqueue and return quickly.
3212*4f1223e8SApple OSS Distributions */
3213*4f1223e8SApple OSS Distributions if (__improbable(make_overcommit)) {
3214*4f1223e8SApple OSS Distributions req->tr_flags ^= WORKQ_TR_FLAG_OVERCOMMIT;
3215*4f1223e8SApple OSS Distributions pq = workq_priority_queue_for_req(wq, req);
3216*4f1223e8SApple OSS Distributions }
3217*4f1223e8SApple OSS Distributions req->tr_qos = qos;
3218*4f1223e8SApple OSS Distributions
3219*4f1223e8SApple OSS Distributions req_max = priority_queue_max(pq, struct workq_threadreq_s, tr_entry);
3220*4f1223e8SApple OSS Distributions if (req_max && req_max->tr_qos >= qos) {
3221*4f1223e8SApple OSS Distributions priority_queue_entry_set_sched_pri(pq, &req->tr_entry,
3222*4f1223e8SApple OSS Distributions workq_priority_for_req(req), false);
3223*4f1223e8SApple OSS Distributions priority_queue_insert(pq, &req->tr_entry);
3224*4f1223e8SApple OSS Distributions workq_unlock(wq);
3225*4f1223e8SApple OSS Distributions return;
3226*4f1223e8SApple OSS Distributions }
3227*4f1223e8SApple OSS Distributions
3228*4f1223e8SApple OSS Distributions /*
3229*4f1223e8SApple OSS Distributions * Stage 3: Reevaluate whether we should run the thread request.
3230*4f1223e8SApple OSS Distributions *
3231*4f1223e8SApple OSS Distributions * Pretend the thread request is new again:
3232*4f1223e8SApple OSS Distributions * - adjust wq_reqcount to not count it anymore.
3233*4f1223e8SApple OSS Distributions * - make its state WORKQ_TR_STATE_NEW (so that workq_threadreq_bind_and_unlock
3234*4f1223e8SApple OSS Distributions * properly attempts a synchronous bind)
3235*4f1223e8SApple OSS Distributions */
3236*4f1223e8SApple OSS Distributions wq->wq_reqcount--;
3237*4f1223e8SApple OSS Distributions req->tr_state = WORKQ_TR_STATE_NEW;
3238*4f1223e8SApple OSS Distributions
3239*4f1223e8SApple OSS Distributions /* We enqueued the highest priority item or we may need to reevaluate if
3240*4f1223e8SApple OSS Distributions * the creator needs a thread group pre-adoption if the request got a new TG */
3241*4f1223e8SApple OSS Distributions bool reevaluate_creator_tg = false;
3242*4f1223e8SApple OSS Distributions
3243*4f1223e8SApple OSS Distributions #if CONFIG_PREADOPT_TG
3244*4f1223e8SApple OSS Distributions reevaluate_creator_tg = (flags & WORKQ_THREADREQ_REEVALUATE_PREADOPT_TG);
3245*4f1223e8SApple OSS Distributions #endif
3246*4f1223e8SApple OSS Distributions
3247*4f1223e8SApple OSS Distributions if (workq_threadreq_enqueue(wq, req) || reevaluate_creator_tg) {
3248*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3249*4f1223e8SApple OSS Distributions }
3250*4f1223e8SApple OSS Distributions workq_unlock(wq);
3251*4f1223e8SApple OSS Distributions }
3252*4f1223e8SApple OSS Distributions
3253*4f1223e8SApple OSS Distributions void
workq_kern_bound_thread_reset_pri(workq_threadreq_t req,struct uthread * uth)3254*4f1223e8SApple OSS Distributions workq_kern_bound_thread_reset_pri(workq_threadreq_t req, struct uthread *uth)
3255*4f1223e8SApple OSS Distributions {
3256*4f1223e8SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
3257*4f1223e8SApple OSS Distributions
3258*4f1223e8SApple OSS Distributions if (req && (req->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS)) {
3259*4f1223e8SApple OSS Distributions /*
3260*4f1223e8SApple OSS Distributions * For requests outside-of-QoS, we set the scheduling policy and
3261*4f1223e8SApple OSS Distributions * absolute priority for the bound thread right at the initialization
3262*4f1223e8SApple OSS Distributions * time. See workq_kern_threadreq_permanent_bind.
3263*4f1223e8SApple OSS Distributions */
3264*4f1223e8SApple OSS Distributions return;
3265*4f1223e8SApple OSS Distributions }
3266*4f1223e8SApple OSS Distributions
3267*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(current_proc());
3268*4f1223e8SApple OSS Distributions if (req) {
3269*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3270*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
3271*4f1223e8SApple OSS Distributions } else {
3272*4f1223e8SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
3273*4f1223e8SApple OSS Distributions if (qos > WORKQ_THREAD_QOS_CLEANUP) {
3274*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, NULL, /*unpark*/ true);
3275*4f1223e8SApple OSS Distributions } else {
3276*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.qos = qos;
3277*4f1223e8SApple OSS Distributions }
3278*4f1223e8SApple OSS Distributions }
3279*4f1223e8SApple OSS Distributions }
3280*4f1223e8SApple OSS Distributions
3281*4f1223e8SApple OSS Distributions void
workq_kern_threadreq_lock(struct proc * p)3282*4f1223e8SApple OSS Distributions workq_kern_threadreq_lock(struct proc *p)
3283*4f1223e8SApple OSS Distributions {
3284*4f1223e8SApple OSS Distributions workq_lock_spin(proc_get_wqptr_fast(p));
3285*4f1223e8SApple OSS Distributions }
3286*4f1223e8SApple OSS Distributions
3287*4f1223e8SApple OSS Distributions void
workq_kern_threadreq_unlock(struct proc * p)3288*4f1223e8SApple OSS Distributions workq_kern_threadreq_unlock(struct proc *p)
3289*4f1223e8SApple OSS Distributions {
3290*4f1223e8SApple OSS Distributions workq_unlock(proc_get_wqptr_fast(p));
3291*4f1223e8SApple OSS Distributions }
3292*4f1223e8SApple OSS Distributions
3293*4f1223e8SApple OSS Distributions void
workq_kern_threadreq_update_inheritor(struct proc * p,workq_threadreq_t req,thread_t owner,struct turnstile * wl_ts,turnstile_update_flags_t flags)3294*4f1223e8SApple OSS Distributions workq_kern_threadreq_update_inheritor(struct proc *p, workq_threadreq_t req,
3295*4f1223e8SApple OSS Distributions thread_t owner, struct turnstile *wl_ts,
3296*4f1223e8SApple OSS Distributions turnstile_update_flags_t flags)
3297*4f1223e8SApple OSS Distributions {
3298*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3299*4f1223e8SApple OSS Distributions turnstile_inheritor_t inheritor;
3300*4f1223e8SApple OSS Distributions
3301*4f1223e8SApple OSS Distributions assert(req->tr_qos != WORKQ_THREAD_QOS_MANAGER);
3302*4f1223e8SApple OSS Distributions assert(req->tr_flags & WORKQ_TR_FLAG_WORKLOOP);
3303*4f1223e8SApple OSS Distributions workq_lock_held(wq);
3304*4f1223e8SApple OSS Distributions
3305*4f1223e8SApple OSS Distributions if (req->tr_state == WORKQ_TR_STATE_BINDING) {
3306*4f1223e8SApple OSS Distributions kqueue_threadreq_bind(p, req, req->tr_thread,
3307*4f1223e8SApple OSS Distributions KQUEUE_THREADREQ_BIND_NO_INHERITOR_UPDATE);
3308*4f1223e8SApple OSS Distributions return;
3309*4f1223e8SApple OSS Distributions }
3310*4f1223e8SApple OSS Distributions
3311*4f1223e8SApple OSS Distributions if (_wq_exiting(wq)) {
3312*4f1223e8SApple OSS Distributions inheritor = TURNSTILE_INHERITOR_NULL;
3313*4f1223e8SApple OSS Distributions } else {
3314*4f1223e8SApple OSS Distributions if (req->tr_state != WORKQ_TR_STATE_QUEUED) {
3315*4f1223e8SApple OSS Distributions panic("Invalid thread request (%p) state %d", req, req->tr_state);
3316*4f1223e8SApple OSS Distributions }
3317*4f1223e8SApple OSS Distributions
3318*4f1223e8SApple OSS Distributions if (owner) {
3319*4f1223e8SApple OSS Distributions inheritor = owner;
3320*4f1223e8SApple OSS Distributions flags |= TURNSTILE_INHERITOR_THREAD;
3321*4f1223e8SApple OSS Distributions } else {
3322*4f1223e8SApple OSS Distributions inheritor = wq->wq_turnstile;
3323*4f1223e8SApple OSS Distributions flags |= TURNSTILE_INHERITOR_TURNSTILE;
3324*4f1223e8SApple OSS Distributions }
3325*4f1223e8SApple OSS Distributions }
3326*4f1223e8SApple OSS Distributions
3327*4f1223e8SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
3328*4f1223e8SApple OSS Distributions turnstile_update_inheritor(wl_ts, inheritor, flags);
3329*4f1223e8SApple OSS Distributions });
3330*4f1223e8SApple OSS Distributions }
3331*4f1223e8SApple OSS Distributions
3332*4f1223e8SApple OSS Distributions /*
3333*4f1223e8SApple OSS Distributions * An entry point for kevent to request a newly created workqueue thread
3334*4f1223e8SApple OSS Distributions * and bind it permanently to the given workqueue thread request.
3335*4f1223e8SApple OSS Distributions *
3336*4f1223e8SApple OSS Distributions * It currently only supports fixed scheduler priority thread requests.
3337*4f1223e8SApple OSS Distributions *
3338*4f1223e8SApple OSS Distributions * The newly created thread counts towards wq_nthreads. This function returns
3339*4f1223e8SApple OSS Distributions * an error if we are above that limit. There is no concept of delayed thread
3340*4f1223e8SApple OSS Distributions * creation for such specially configured kqworkloops.
3341*4f1223e8SApple OSS Distributions *
3342*4f1223e8SApple OSS Distributions * If successful, the newly created thread will be parked in
3343*4f1223e8SApple OSS Distributions * workq_bound_thread_initialize_and_unpark_continue waiting for
3344*4f1223e8SApple OSS Distributions * new incoming events.
3345*4f1223e8SApple OSS Distributions */
3346*4f1223e8SApple OSS Distributions kern_return_t
workq_kern_threadreq_permanent_bind(struct proc * p,struct workq_threadreq_s * kqr)3347*4f1223e8SApple OSS Distributions workq_kern_threadreq_permanent_bind(struct proc *p, struct workq_threadreq_s *kqr)
3348*4f1223e8SApple OSS Distributions {
3349*4f1223e8SApple OSS Distributions kern_return_t ret = 0;
3350*4f1223e8SApple OSS Distributions thread_t new_thread = NULL;
3351*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3352*4f1223e8SApple OSS Distributions
3353*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3354*4f1223e8SApple OSS Distributions
3355*4f1223e8SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
3356*4f1223e8SApple OSS Distributions ret = EDOM;
3357*4f1223e8SApple OSS Distributions } else {
3358*4f1223e8SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3359*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(kqr);
3360*4f1223e8SApple OSS Distributions /*
3361*4f1223e8SApple OSS Distributions * For requests outside-of-QoS, we fully initialize the thread
3362*4f1223e8SApple OSS Distributions * request here followed by preadopting the scheduling properties
3363*4f1223e8SApple OSS Distributions * on the newly created bound thread.
3364*4f1223e8SApple OSS Distributions */
3365*4f1223e8SApple OSS Distributions thread_qos_t qos = thread_workq_qos_for_pri(trp.trp_pri);
3366*4f1223e8SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3367*4f1223e8SApple OSS Distributions qos = WORKQ_THREAD_QOS_ABOVEUI;
3368*4f1223e8SApple OSS Distributions }
3369*4f1223e8SApple OSS Distributions kqr->tr_qos = qos;
3370*4f1223e8SApple OSS Distributions }
3371*4f1223e8SApple OSS Distributions kqr->tr_count = 1;
3372*4f1223e8SApple OSS Distributions
3373*4f1223e8SApple OSS Distributions /* workq_lock dropped and retaken around thread creation below. */
3374*4f1223e8SApple OSS Distributions ret = workq_add_new_idle_thread(p, wq,
3375*4f1223e8SApple OSS Distributions workq_bound_thread_initialize_and_unpark_continue,
3376*4f1223e8SApple OSS Distributions true, &new_thread);
3377*4f1223e8SApple OSS Distributions if (ret == KERN_SUCCESS) {
3378*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(new_thread);
3379*4f1223e8SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WL_OUTSIDE_QOS) {
3380*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, kqr, /*unpark*/ true);
3381*4f1223e8SApple OSS Distributions }
3382*4f1223e8SApple OSS Distributions /*
3383*4f1223e8SApple OSS Distributions * The newly created thread goes through a full bind to the kqwl
3384*4f1223e8SApple OSS Distributions * right upon creation.
3385*4f1223e8SApple OSS Distributions * It then falls back to soft bind/unbind upon wakeup/park.
3386*4f1223e8SApple OSS Distributions */
3387*4f1223e8SApple OSS Distributions kqueue_threadreq_bind_prepost(p, kqr, uth);
3388*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_PERMANENT_BIND;
3389*4f1223e8SApple OSS Distributions }
3390*4f1223e8SApple OSS Distributions }
3391*4f1223e8SApple OSS Distributions
3392*4f1223e8SApple OSS Distributions workq_unlock(wq);
3393*4f1223e8SApple OSS Distributions
3394*4f1223e8SApple OSS Distributions if (ret == KERN_SUCCESS) {
3395*4f1223e8SApple OSS Distributions kqueue_threadreq_bind_commit(p, new_thread);
3396*4f1223e8SApple OSS Distributions }
3397*4f1223e8SApple OSS Distributions return ret;
3398*4f1223e8SApple OSS Distributions }
3399*4f1223e8SApple OSS Distributions
3400*4f1223e8SApple OSS Distributions /*
3401*4f1223e8SApple OSS Distributions * Called with kqlock held. It does not need to take the process wide
3402*4f1223e8SApple OSS Distributions * global workq lock -> making it faster.
3403*4f1223e8SApple OSS Distributions */
3404*4f1223e8SApple OSS Distributions void
workq_kern_bound_thread_wakeup(struct workq_threadreq_s * kqr)3405*4f1223e8SApple OSS Distributions workq_kern_bound_thread_wakeup(struct workq_threadreq_s *kqr)
3406*4f1223e8SApple OSS Distributions {
3407*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3408*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = kqueue_threadreq_workloop_param(kqr);
3409*4f1223e8SApple OSS Distributions
3410*4f1223e8SApple OSS Distributions /*
3411*4f1223e8SApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3412*4f1223e8SApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3413*4f1223e8SApple OSS Distributions */
3414*4f1223e8SApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING)) == 0);
3415*4f1223e8SApple OSS Distributions
3416*4f1223e8SApple OSS Distributions if (trp.trp_flags & TRP_RELEASED) {
3417*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
3418*4f1223e8SApple OSS Distributions } else {
3419*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_RUNNING;
3420*4f1223e8SApple OSS Distributions }
3421*4f1223e8SApple OSS Distributions
3422*4f1223e8SApple OSS Distributions workq_thread_wakeup(uth);
3423*4f1223e8SApple OSS Distributions }
3424*4f1223e8SApple OSS Distributions
3425*4f1223e8SApple OSS Distributions /*
3426*4f1223e8SApple OSS Distributions * Called with kqlock held. Dropped before parking.
3427*4f1223e8SApple OSS Distributions * It does not need to take process wide global workqueue
3428*4f1223e8SApple OSS Distributions * lock -> making it faster.
3429*4f1223e8SApple OSS Distributions */
3430*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
3431*4f1223e8SApple OSS Distributions void
workq_kern_bound_thread_park(struct workq_threadreq_s * kqr)3432*4f1223e8SApple OSS Distributions workq_kern_bound_thread_park(struct workq_threadreq_s *kqr)
3433*4f1223e8SApple OSS Distributions {
3434*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3435*4f1223e8SApple OSS Distributions assert(uth == current_uthread());
3436*4f1223e8SApple OSS Distributions
3437*4f1223e8SApple OSS Distributions /*
3438*4f1223e8SApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3439*4f1223e8SApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3440*4f1223e8SApple OSS Distributions */
3441*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_RUNNING);
3442*4f1223e8SApple OSS Distributions
3443*4f1223e8SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3444*4f1223e8SApple OSS Distributions
3445*4f1223e8SApple OSS Distributions /*
3446*4f1223e8SApple OSS Distributions * TODO (pavhad) We could do the reusable userspace stack performance
3447*4f1223e8SApple OSS Distributions * optimization here.
3448*4f1223e8SApple OSS Distributions */
3449*4f1223e8SApple OSS Distributions
3450*4f1223e8SApple OSS Distributions kqworkloop_bound_thread_park_prepost(kqr);
3451*4f1223e8SApple OSS Distributions /* KQ_SLEEP bit is set and kqlock is dropped. */
3452*4f1223e8SApple OSS Distributions
3453*4f1223e8SApple OSS Distributions __assert_only kern_return_t kr;
3454*4f1223e8SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3455*4f1223e8SApple OSS Distributions assert(kr == KERN_SUCCESS);
3456*4f1223e8SApple OSS Distributions
3457*4f1223e8SApple OSS Distributions kqworkloop_bound_thread_park_commit(kqr,
3458*4f1223e8SApple OSS Distributions workq_parked_wait_event(uth), workq_bound_thread_unpark_continue);
3459*4f1223e8SApple OSS Distributions
3460*4f1223e8SApple OSS Distributions __builtin_unreachable();
3461*4f1223e8SApple OSS Distributions }
3462*4f1223e8SApple OSS Distributions
3463*4f1223e8SApple OSS Distributions /*
3464*4f1223e8SApple OSS Distributions * To terminate the permenantly bound workqueue thread. It unbinds itself
3465*4f1223e8SApple OSS Distributions * with the kqwl during uthread_cleanup -> kqueue_threadreq_unbind.
3466*4f1223e8SApple OSS Distributions * It is also when it will release its reference on the kqwl.
3467*4f1223e8SApple OSS Distributions */
3468*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
3469*4f1223e8SApple OSS Distributions void
workq_kern_bound_thread_terminate(struct workq_threadreq_s * kqr)3470*4f1223e8SApple OSS Distributions workq_kern_bound_thread_terminate(struct workq_threadreq_s *kqr)
3471*4f1223e8SApple OSS Distributions {
3472*4f1223e8SApple OSS Distributions proc_t p = current_proc();
3473*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(kqr->tr_thread);
3474*4f1223e8SApple OSS Distributions uint16_t uu_workq_flags_orig;
3475*4f1223e8SApple OSS Distributions
3476*4f1223e8SApple OSS Distributions assert(uth == current_uthread());
3477*4f1223e8SApple OSS Distributions
3478*4f1223e8SApple OSS Distributions /*
3479*4f1223e8SApple OSS Distributions * See "Locking model for accessing uu_workq_flags" for more information
3480*4f1223e8SApple OSS Distributions * on how access to uu_workq_flags for the bound thread is synchronized.
3481*4f1223e8SApple OSS Distributions */
3482*4f1223e8SApple OSS Distributions kqworkloop_bound_thread_terminate(kqr, &uu_workq_flags_orig);
3483*4f1223e8SApple OSS Distributions
3484*4f1223e8SApple OSS Distributions if (uu_workq_flags_orig & UT_WORKQ_WORK_INTERVAL_JOINED) {
3485*4f1223e8SApple OSS Distributions __assert_only kern_return_t kr;
3486*4f1223e8SApple OSS Distributions kr = kern_work_interval_join(get_machthread(uth), MACH_PORT_NULL);
3487*4f1223e8SApple OSS Distributions /* The bound thread un-joins the work interval and drops its +1 ref. */
3488*4f1223e8SApple OSS Distributions assert(kr == KERN_SUCCESS);
3489*4f1223e8SApple OSS Distributions }
3490*4f1223e8SApple OSS Distributions
3491*4f1223e8SApple OSS Distributions /*
3492*4f1223e8SApple OSS Distributions * Drop the voucher now that we are on our way to termination.
3493*4f1223e8SApple OSS Distributions */
3494*4f1223e8SApple OSS Distributions __assert_only kern_return_t kr;
3495*4f1223e8SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3496*4f1223e8SApple OSS Distributions assert(kr == KERN_SUCCESS);
3497*4f1223e8SApple OSS Distributions
3498*4f1223e8SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
3499*4f1223e8SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
3500*4f1223e8SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3501*4f1223e8SApple OSS Distributions
3502*4f1223e8SApple OSS Distributions thread_t th = get_machthread(uth);
3503*4f1223e8SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
3504*4f1223e8SApple OSS Distributions
3505*4f1223e8SApple OSS Distributions if ((uu_workq_flags_orig & UT_WORKQ_NEW) == 0) {
3506*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
3507*4f1223e8SApple OSS Distributions }
3508*4f1223e8SApple OSS Distributions
3509*4f1223e8SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
3510*4f1223e8SApple OSS Distributions uth->uu_workq_thport, 0, WQ_SETUP_EXIT_THREAD, upcall_flags);
3511*4f1223e8SApple OSS Distributions __builtin_unreachable();
3512*4f1223e8SApple OSS Distributions }
3513*4f1223e8SApple OSS Distributions
3514*4f1223e8SApple OSS Distributions void
workq_kern_threadreq_redrive(struct proc * p,workq_kern_threadreq_flags_t flags)3515*4f1223e8SApple OSS Distributions workq_kern_threadreq_redrive(struct proc *p, workq_kern_threadreq_flags_t flags)
3516*4f1223e8SApple OSS Distributions {
3517*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
3518*4f1223e8SApple OSS Distributions
3519*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3520*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, flags);
3521*4f1223e8SApple OSS Distributions workq_unlock(wq);
3522*4f1223e8SApple OSS Distributions }
3523*4f1223e8SApple OSS Distributions
3524*4f1223e8SApple OSS Distributions /*
3525*4f1223e8SApple OSS Distributions * Always called at AST by the thread on itself
3526*4f1223e8SApple OSS Distributions *
3527*4f1223e8SApple OSS Distributions * Upon quantum expiry, the workqueue subsystem evaluates its state and decides
3528*4f1223e8SApple OSS Distributions * on what the thread should do next. The TSD value is always set by the thread
3529*4f1223e8SApple OSS Distributions * on itself in the kernel and cleared either by userspace when it acks the TSD
3530*4f1223e8SApple OSS Distributions * value and takes action, or by the thread in the kernel when the quantum
3531*4f1223e8SApple OSS Distributions * expires again.
3532*4f1223e8SApple OSS Distributions */
3533*4f1223e8SApple OSS Distributions void
workq_kern_quantum_expiry_reevaluate(proc_t proc,thread_t thread)3534*4f1223e8SApple OSS Distributions workq_kern_quantum_expiry_reevaluate(proc_t proc, thread_t thread)
3535*4f1223e8SApple OSS Distributions {
3536*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(thread);
3537*4f1223e8SApple OSS Distributions
3538*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3539*4f1223e8SApple OSS Distributions return;
3540*4f1223e8SApple OSS Distributions }
3541*4f1223e8SApple OSS Distributions
3542*4f1223e8SApple OSS Distributions if (!thread_supports_cooperative_workqueue(thread)) {
3543*4f1223e8SApple OSS Distributions panic("Quantum expired for thread that doesn't support cooperative workqueue");
3544*4f1223e8SApple OSS Distributions }
3545*4f1223e8SApple OSS Distributions
3546*4f1223e8SApple OSS Distributions thread_qos_t qos = uth->uu_workq_pri.qos_bucket;
3547*4f1223e8SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3548*4f1223e8SApple OSS Distributions panic("Thread should not have workq bucket of QoS UN");
3549*4f1223e8SApple OSS Distributions }
3550*4f1223e8SApple OSS Distributions
3551*4f1223e8SApple OSS Distributions assert(thread_has_expired_workqueue_quantum(thread, false));
3552*4f1223e8SApple OSS Distributions
3553*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(proc);
3554*4f1223e8SApple OSS Distributions assert(wq != NULL);
3555*4f1223e8SApple OSS Distributions
3556*4f1223e8SApple OSS Distributions /*
3557*4f1223e8SApple OSS Distributions * For starters, we're just going to evaluate and see if we need to narrow
3558*4f1223e8SApple OSS Distributions * the pool and tell this thread to park if needed. In the future, we'll
3559*4f1223e8SApple OSS Distributions * evaluate and convey other workqueue state information like needing to
3560*4f1223e8SApple OSS Distributions * pump kevents, etc.
3561*4f1223e8SApple OSS Distributions */
3562*4f1223e8SApple OSS Distributions uint64_t flags = 0;
3563*4f1223e8SApple OSS Distributions
3564*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3565*4f1223e8SApple OSS Distributions
3566*4f1223e8SApple OSS Distributions if (workq_thread_is_cooperative(uth)) {
3567*4f1223e8SApple OSS Distributions if (!workq_cooperative_allowance(wq, qos, uth, false)) {
3568*4f1223e8SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3569*4f1223e8SApple OSS Distributions } else {
3570*4f1223e8SApple OSS Distributions /* In the future, when we have kevent hookups for the cooperative
3571*4f1223e8SApple OSS Distributions * pool, we need fancier logic for what userspace should do. But
3572*4f1223e8SApple OSS Distributions * right now, only userspace thread requests exist - so we'll just
3573*4f1223e8SApple OSS Distributions * tell userspace to shuffle work items */
3574*4f1223e8SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_SHUFFLE;
3575*4f1223e8SApple OSS Distributions }
3576*4f1223e8SApple OSS Distributions } else if (workq_thread_is_nonovercommit(uth)) {
3577*4f1223e8SApple OSS Distributions if (!workq_constrained_allowance(wq, qos, uth, false, false)) {
3578*4f1223e8SApple OSS Distributions flags |= PTHREAD_WQ_QUANTUM_EXPIRY_NARROW;
3579*4f1223e8SApple OSS Distributions }
3580*4f1223e8SApple OSS Distributions }
3581*4f1223e8SApple OSS Distributions workq_unlock(wq);
3582*4f1223e8SApple OSS Distributions
3583*4f1223e8SApple OSS Distributions WQ_TRACE(TRACE_wq_quantum_expiry_reevaluate, flags, 0, 0, 0);
3584*4f1223e8SApple OSS Distributions
3585*4f1223e8SApple OSS Distributions kevent_set_workq_quantum_expiry_user_tsd(proc, thread, flags);
3586*4f1223e8SApple OSS Distributions
3587*4f1223e8SApple OSS Distributions /* We have conveyed to userspace about what it needs to do upon quantum
3588*4f1223e8SApple OSS Distributions * expiry, now rearm the workqueue quantum again */
3589*4f1223e8SApple OSS Distributions thread_arm_workqueue_quantum(get_machthread(uth));
3590*4f1223e8SApple OSS Distributions }
3591*4f1223e8SApple OSS Distributions
3592*4f1223e8SApple OSS Distributions void
workq_schedule_creator_turnstile_redrive(struct workqueue * wq,bool locked)3593*4f1223e8SApple OSS Distributions workq_schedule_creator_turnstile_redrive(struct workqueue *wq, bool locked)
3594*4f1223e8SApple OSS Distributions {
3595*4f1223e8SApple OSS Distributions if (locked) {
3596*4f1223e8SApple OSS Distributions workq_schedule_creator(NULL, wq, WORKQ_THREADREQ_NONE);
3597*4f1223e8SApple OSS Distributions } else {
3598*4f1223e8SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
3599*4f1223e8SApple OSS Distributions }
3600*4f1223e8SApple OSS Distributions }
3601*4f1223e8SApple OSS Distributions
3602*4f1223e8SApple OSS Distributions static int
workq_thread_return(struct proc * p,struct workq_kernreturn_args * uap,struct workqueue * wq)3603*4f1223e8SApple OSS Distributions workq_thread_return(struct proc *p, struct workq_kernreturn_args *uap,
3604*4f1223e8SApple OSS Distributions struct workqueue *wq)
3605*4f1223e8SApple OSS Distributions {
3606*4f1223e8SApple OSS Distributions thread_t th = current_thread();
3607*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3608*4f1223e8SApple OSS Distributions workq_threadreq_t kqr = uth->uu_kqr_bound;
3609*4f1223e8SApple OSS Distributions workq_threadreq_param_t trp = { };
3610*4f1223e8SApple OSS Distributions int nevents = uap->affinity, error;
3611*4f1223e8SApple OSS Distributions user_addr_t eventlist = uap->item;
3612*4f1223e8SApple OSS Distributions
3613*4f1223e8SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3614*4f1223e8SApple OSS Distributions (uth->uu_workq_flags & UT_WORKQ_DYING)) {
3615*4f1223e8SApple OSS Distributions return EINVAL;
3616*4f1223e8SApple OSS Distributions }
3617*4f1223e8SApple OSS Distributions
3618*4f1223e8SApple OSS Distributions if (eventlist && nevents && kqr == NULL) {
3619*4f1223e8SApple OSS Distributions return EINVAL;
3620*4f1223e8SApple OSS Distributions }
3621*4f1223e8SApple OSS Distributions
3622*4f1223e8SApple OSS Distributions /*
3623*4f1223e8SApple OSS Distributions * Reset signal mask on the workqueue thread to default state,
3624*4f1223e8SApple OSS Distributions * but do not touch any signals that are marked for preservation.
3625*4f1223e8SApple OSS Distributions */
3626*4f1223e8SApple OSS Distributions sigset_t resettable = uth->uu_sigmask & ~p->p_workq_allow_sigmask;
3627*4f1223e8SApple OSS Distributions if (resettable != (sigset_t)~workq_threadmask) {
3628*4f1223e8SApple OSS Distributions proc_lock(p);
3629*4f1223e8SApple OSS Distributions uth->uu_sigmask |= ~workq_threadmask & ~p->p_workq_allow_sigmask;
3630*4f1223e8SApple OSS Distributions proc_unlock(p);
3631*4f1223e8SApple OSS Distributions }
3632*4f1223e8SApple OSS Distributions
3633*4f1223e8SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_WL_PARAMS) {
3634*4f1223e8SApple OSS Distributions /*
3635*4f1223e8SApple OSS Distributions * Ensure we store the threadreq param before unbinding
3636*4f1223e8SApple OSS Distributions * the kqr from this thread.
3637*4f1223e8SApple OSS Distributions */
3638*4f1223e8SApple OSS Distributions trp = kqueue_threadreq_workloop_param(kqr);
3639*4f1223e8SApple OSS Distributions }
3640*4f1223e8SApple OSS Distributions
3641*4f1223e8SApple OSS Distributions if (kqr && kqr->tr_flags & WORKQ_TR_FLAG_PERMANENT_BIND) {
3642*4f1223e8SApple OSS Distributions goto handle_stack_events;
3643*4f1223e8SApple OSS Distributions }
3644*4f1223e8SApple OSS Distributions
3645*4f1223e8SApple OSS Distributions /*
3646*4f1223e8SApple OSS Distributions * Freeze the base pri while we decide the fate of this thread.
3647*4f1223e8SApple OSS Distributions *
3648*4f1223e8SApple OSS Distributions * Either:
3649*4f1223e8SApple OSS Distributions * - we return to user and kevent_cleanup will have unfrozen the base pri,
3650*4f1223e8SApple OSS Distributions * - or we proceed to workq_select_threadreq_or_park_and_unlock() who will.
3651*4f1223e8SApple OSS Distributions */
3652*4f1223e8SApple OSS Distributions thread_freeze_base_pri(th);
3653*4f1223e8SApple OSS Distributions
3654*4f1223e8SApple OSS Distributions handle_stack_events:
3655*4f1223e8SApple OSS Distributions
3656*4f1223e8SApple OSS Distributions if (kqr) {
3657*4f1223e8SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI | WQ_FLAG_THREAD_REUSE;
3658*4f1223e8SApple OSS Distributions if (kqr->tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
3659*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
3660*4f1223e8SApple OSS Distributions } else {
3661*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
3662*4f1223e8SApple OSS Distributions }
3663*4f1223e8SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
3664*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
3665*4f1223e8SApple OSS Distributions } else {
3666*4f1223e8SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
3667*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
3668*4f1223e8SApple OSS Distributions }
3669*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
3670*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
3671*4f1223e8SApple OSS Distributions } else {
3672*4f1223e8SApple OSS Distributions upcall_flags |= uth->uu_workq_pri.qos_req |
3673*4f1223e8SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
3674*4f1223e8SApple OSS Distributions }
3675*4f1223e8SApple OSS Distributions }
3676*4f1223e8SApple OSS Distributions error = pthread_functions->workq_handle_stack_events(p, th,
3677*4f1223e8SApple OSS Distributions get_task_map(proc_task(p)), uth->uu_workq_stackaddr,
3678*4f1223e8SApple OSS Distributions uth->uu_workq_thport, eventlist, nevents, upcall_flags);
3679*4f1223e8SApple OSS Distributions if (error) {
3680*4f1223e8SApple OSS Distributions assert(uth->uu_kqr_bound == kqr);
3681*4f1223e8SApple OSS Distributions return error;
3682*4f1223e8SApple OSS Distributions }
3683*4f1223e8SApple OSS Distributions
3684*4f1223e8SApple OSS Distributions // pthread is supposed to pass KEVENT_FLAG_PARKING here
3685*4f1223e8SApple OSS Distributions // which should cause the above call to either:
3686*4f1223e8SApple OSS Distributions // - not return
3687*4f1223e8SApple OSS Distributions // - return an error
3688*4f1223e8SApple OSS Distributions // - return 0 and have unbound properly
3689*4f1223e8SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3690*4f1223e8SApple OSS Distributions }
3691*4f1223e8SApple OSS Distributions
3692*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_END, wq, uap->options, 0, 0);
3693*4f1223e8SApple OSS Distributions
3694*4f1223e8SApple OSS Distributions thread_sched_call(th, NULL);
3695*4f1223e8SApple OSS Distributions thread_will_park_or_terminate(th);
3696*4f1223e8SApple OSS Distributions #if CONFIG_WORKLOOP_DEBUG
3697*4f1223e8SApple OSS Distributions UU_KEVENT_HISTORY_WRITE_ENTRY(uth, { .uu_error = -1, });
3698*4f1223e8SApple OSS Distributions #endif
3699*4f1223e8SApple OSS Distributions
3700*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3701*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3702*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.workloop_params = trp.trp_value;
3703*4f1223e8SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth,
3704*4f1223e8SApple OSS Distributions WQ_SETUP_CLEAR_VOUCHER);
3705*4f1223e8SApple OSS Distributions __builtin_unreachable();
3706*4f1223e8SApple OSS Distributions }
3707*4f1223e8SApple OSS Distributions
3708*4f1223e8SApple OSS Distributions /**
3709*4f1223e8SApple OSS Distributions * Multiplexed call to interact with the workqueue mechanism
3710*4f1223e8SApple OSS Distributions */
3711*4f1223e8SApple OSS Distributions int
workq_kernreturn(struct proc * p,struct workq_kernreturn_args * uap,int32_t * retval)3712*4f1223e8SApple OSS Distributions workq_kernreturn(struct proc *p, struct workq_kernreturn_args *uap, int32_t *retval)
3713*4f1223e8SApple OSS Distributions {
3714*4f1223e8SApple OSS Distributions int options = uap->options;
3715*4f1223e8SApple OSS Distributions int arg2 = uap->affinity;
3716*4f1223e8SApple OSS Distributions int arg3 = uap->prio;
3717*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
3718*4f1223e8SApple OSS Distributions int error = 0;
3719*4f1223e8SApple OSS Distributions
3720*4f1223e8SApple OSS Distributions if ((p->p_lflag & P_LREGISTER) == 0) {
3721*4f1223e8SApple OSS Distributions return EINVAL;
3722*4f1223e8SApple OSS Distributions }
3723*4f1223e8SApple OSS Distributions
3724*4f1223e8SApple OSS Distributions switch (options) {
3725*4f1223e8SApple OSS Distributions case WQOPS_QUEUE_NEWSPISUPP: {
3726*4f1223e8SApple OSS Distributions /*
3727*4f1223e8SApple OSS Distributions * arg2 = offset of serialno into dispatch queue
3728*4f1223e8SApple OSS Distributions * arg3 = kevent support
3729*4f1223e8SApple OSS Distributions */
3730*4f1223e8SApple OSS Distributions int offset = arg2;
3731*4f1223e8SApple OSS Distributions if (arg3 & 0x01) {
3732*4f1223e8SApple OSS Distributions // If we get here, then userspace has indicated support for kevent delivery.
3733*4f1223e8SApple OSS Distributions }
3734*4f1223e8SApple OSS Distributions
3735*4f1223e8SApple OSS Distributions p->p_dispatchqueue_serialno_offset = (uint64_t)offset;
3736*4f1223e8SApple OSS Distributions break;
3737*4f1223e8SApple OSS Distributions }
3738*4f1223e8SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS: {
3739*4f1223e8SApple OSS Distributions /*
3740*4f1223e8SApple OSS Distributions * arg2 = number of threads to start
3741*4f1223e8SApple OSS Distributions * arg3 = priority
3742*4f1223e8SApple OSS Distributions */
3743*4f1223e8SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, false);
3744*4f1223e8SApple OSS Distributions break;
3745*4f1223e8SApple OSS Distributions }
3746*4f1223e8SApple OSS Distributions /* For requesting threads for the cooperative pool */
3747*4f1223e8SApple OSS Distributions case WQOPS_QUEUE_REQTHREADS2: {
3748*4f1223e8SApple OSS Distributions /*
3749*4f1223e8SApple OSS Distributions * arg2 = number of threads to start
3750*4f1223e8SApple OSS Distributions * arg3 = priority
3751*4f1223e8SApple OSS Distributions */
3752*4f1223e8SApple OSS Distributions error = workq_reqthreads(p, arg2, arg3, true);
3753*4f1223e8SApple OSS Distributions break;
3754*4f1223e8SApple OSS Distributions }
3755*4f1223e8SApple OSS Distributions case WQOPS_SET_EVENT_MANAGER_PRIORITY: {
3756*4f1223e8SApple OSS Distributions /*
3757*4f1223e8SApple OSS Distributions * arg2 = priority for the manager thread
3758*4f1223e8SApple OSS Distributions *
3759*4f1223e8SApple OSS Distributions * if _PTHREAD_PRIORITY_SCHED_PRI_FLAG is set,
3760*4f1223e8SApple OSS Distributions * the low bits of the value contains a scheduling priority
3761*4f1223e8SApple OSS Distributions * instead of a QOS value
3762*4f1223e8SApple OSS Distributions */
3763*4f1223e8SApple OSS Distributions pthread_priority_t pri = arg2;
3764*4f1223e8SApple OSS Distributions
3765*4f1223e8SApple OSS Distributions if (wq == NULL) {
3766*4f1223e8SApple OSS Distributions error = EINVAL;
3767*4f1223e8SApple OSS Distributions break;
3768*4f1223e8SApple OSS Distributions }
3769*4f1223e8SApple OSS Distributions
3770*4f1223e8SApple OSS Distributions /*
3771*4f1223e8SApple OSS Distributions * Normalize the incoming priority so that it is ordered numerically.
3772*4f1223e8SApple OSS Distributions */
3773*4f1223e8SApple OSS Distributions if (_pthread_priority_has_sched_pri(pri)) {
3774*4f1223e8SApple OSS Distributions pri &= (_PTHREAD_PRIORITY_SCHED_PRI_MASK |
3775*4f1223e8SApple OSS Distributions _PTHREAD_PRIORITY_SCHED_PRI_FLAG);
3776*4f1223e8SApple OSS Distributions } else {
3777*4f1223e8SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(pri);
3778*4f1223e8SApple OSS Distributions int relpri = _pthread_priority_relpri(pri);
3779*4f1223e8SApple OSS Distributions if (relpri > 0 || relpri < THREAD_QOS_MIN_TIER_IMPORTANCE ||
3780*4f1223e8SApple OSS Distributions qos == THREAD_QOS_UNSPECIFIED) {
3781*4f1223e8SApple OSS Distributions error = EINVAL;
3782*4f1223e8SApple OSS Distributions break;
3783*4f1223e8SApple OSS Distributions }
3784*4f1223e8SApple OSS Distributions pri &= ~_PTHREAD_PRIORITY_FLAGS_MASK;
3785*4f1223e8SApple OSS Distributions }
3786*4f1223e8SApple OSS Distributions
3787*4f1223e8SApple OSS Distributions /*
3788*4f1223e8SApple OSS Distributions * If userspace passes a scheduling priority, that wins over any QoS.
3789*4f1223e8SApple OSS Distributions * Userspace should takes care not to lower the priority this way.
3790*4f1223e8SApple OSS Distributions */
3791*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3792*4f1223e8SApple OSS Distributions if (wq->wq_event_manager_priority < (uint32_t)pri) {
3793*4f1223e8SApple OSS Distributions wq->wq_event_manager_priority = (uint32_t)pri;
3794*4f1223e8SApple OSS Distributions }
3795*4f1223e8SApple OSS Distributions workq_unlock(wq);
3796*4f1223e8SApple OSS Distributions break;
3797*4f1223e8SApple OSS Distributions }
3798*4f1223e8SApple OSS Distributions case WQOPS_THREAD_KEVENT_RETURN:
3799*4f1223e8SApple OSS Distributions case WQOPS_THREAD_WORKLOOP_RETURN:
3800*4f1223e8SApple OSS Distributions case WQOPS_THREAD_RETURN: {
3801*4f1223e8SApple OSS Distributions error = workq_thread_return(p, uap, wq);
3802*4f1223e8SApple OSS Distributions break;
3803*4f1223e8SApple OSS Distributions }
3804*4f1223e8SApple OSS Distributions
3805*4f1223e8SApple OSS Distributions case WQOPS_SHOULD_NARROW: {
3806*4f1223e8SApple OSS Distributions /*
3807*4f1223e8SApple OSS Distributions * arg2 = priority to test
3808*4f1223e8SApple OSS Distributions * arg3 = unused
3809*4f1223e8SApple OSS Distributions */
3810*4f1223e8SApple OSS Distributions thread_t th = current_thread();
3811*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
3812*4f1223e8SApple OSS Distributions if (((thread_get_tag(th) & THREAD_TAG_WORKQUEUE) == 0) ||
3813*4f1223e8SApple OSS Distributions (uth->uu_workq_flags & (UT_WORKQ_DYING | UT_WORKQ_OVERCOMMIT))) {
3814*4f1223e8SApple OSS Distributions error = EINVAL;
3815*4f1223e8SApple OSS Distributions break;
3816*4f1223e8SApple OSS Distributions }
3817*4f1223e8SApple OSS Distributions
3818*4f1223e8SApple OSS Distributions thread_qos_t qos = _pthread_priority_thread_qos(arg2);
3819*4f1223e8SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
3820*4f1223e8SApple OSS Distributions error = EINVAL;
3821*4f1223e8SApple OSS Distributions break;
3822*4f1223e8SApple OSS Distributions }
3823*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3824*4f1223e8SApple OSS Distributions bool should_narrow = !workq_constrained_allowance(wq, qos, uth, false, false);
3825*4f1223e8SApple OSS Distributions workq_unlock(wq);
3826*4f1223e8SApple OSS Distributions
3827*4f1223e8SApple OSS Distributions *retval = should_narrow;
3828*4f1223e8SApple OSS Distributions break;
3829*4f1223e8SApple OSS Distributions }
3830*4f1223e8SApple OSS Distributions case WQOPS_SETUP_DISPATCH: {
3831*4f1223e8SApple OSS Distributions /*
3832*4f1223e8SApple OSS Distributions * item = pointer to workq_dispatch_config structure
3833*4f1223e8SApple OSS Distributions * arg2 = sizeof(item)
3834*4f1223e8SApple OSS Distributions */
3835*4f1223e8SApple OSS Distributions struct workq_dispatch_config cfg;
3836*4f1223e8SApple OSS Distributions bzero(&cfg, sizeof(cfg));
3837*4f1223e8SApple OSS Distributions
3838*4f1223e8SApple OSS Distributions error = copyin(uap->item, &cfg, MIN(sizeof(cfg), (unsigned long) arg2));
3839*4f1223e8SApple OSS Distributions if (error) {
3840*4f1223e8SApple OSS Distributions break;
3841*4f1223e8SApple OSS Distributions }
3842*4f1223e8SApple OSS Distributions
3843*4f1223e8SApple OSS Distributions if (cfg.wdc_flags & ~WORKQ_DISPATCH_SUPPORTED_FLAGS ||
3844*4f1223e8SApple OSS Distributions cfg.wdc_version < WORKQ_DISPATCH_MIN_SUPPORTED_VERSION) {
3845*4f1223e8SApple OSS Distributions error = ENOTSUP;
3846*4f1223e8SApple OSS Distributions break;
3847*4f1223e8SApple OSS Distributions }
3848*4f1223e8SApple OSS Distributions
3849*4f1223e8SApple OSS Distributions /* Load fields from version 1 */
3850*4f1223e8SApple OSS Distributions p->p_dispatchqueue_serialno_offset = cfg.wdc_queue_serialno_offs;
3851*4f1223e8SApple OSS Distributions
3852*4f1223e8SApple OSS Distributions /* Load fields from version 2 */
3853*4f1223e8SApple OSS Distributions if (cfg.wdc_version >= 2) {
3854*4f1223e8SApple OSS Distributions p->p_dispatchqueue_label_offset = cfg.wdc_queue_label_offs;
3855*4f1223e8SApple OSS Distributions }
3856*4f1223e8SApple OSS Distributions
3857*4f1223e8SApple OSS Distributions break;
3858*4f1223e8SApple OSS Distributions }
3859*4f1223e8SApple OSS Distributions default:
3860*4f1223e8SApple OSS Distributions error = EINVAL;
3861*4f1223e8SApple OSS Distributions break;
3862*4f1223e8SApple OSS Distributions }
3863*4f1223e8SApple OSS Distributions
3864*4f1223e8SApple OSS Distributions return error;
3865*4f1223e8SApple OSS Distributions }
3866*4f1223e8SApple OSS Distributions
3867*4f1223e8SApple OSS Distributions /*
3868*4f1223e8SApple OSS Distributions * We have no work to do, park ourselves on the idle list.
3869*4f1223e8SApple OSS Distributions *
3870*4f1223e8SApple OSS Distributions * Consumes the workqueue lock and does not return.
3871*4f1223e8SApple OSS Distributions */
3872*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
3873*4f1223e8SApple OSS Distributions static void
workq_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)3874*4f1223e8SApple OSS Distributions workq_park_and_unlock(proc_t p, struct workqueue *wq, struct uthread *uth,
3875*4f1223e8SApple OSS Distributions uint32_t setup_flags)
3876*4f1223e8SApple OSS Distributions {
3877*4f1223e8SApple OSS Distributions assert(uth == current_uthread());
3878*4f1223e8SApple OSS Distributions assert(uth->uu_kqr_bound == NULL);
3879*4f1223e8SApple OSS Distributions workq_push_idle_thread(p, wq, uth, setup_flags); // may not return
3880*4f1223e8SApple OSS Distributions
3881*4f1223e8SApple OSS Distributions workq_thread_reset_cpupercent(NULL, uth);
3882*4f1223e8SApple OSS Distributions
3883*4f1223e8SApple OSS Distributions #if CONFIG_PREADOPT_TG
3884*4f1223e8SApple OSS Distributions /* Clear the preadoption thread group on the thread.
3885*4f1223e8SApple OSS Distributions *
3886*4f1223e8SApple OSS Distributions * Case 1:
3887*4f1223e8SApple OSS Distributions * Creator thread which never picked up a thread request. We set a
3888*4f1223e8SApple OSS Distributions * preadoption thread group on creator threads but if it never picked
3889*4f1223e8SApple OSS Distributions * up a thread request and didn't go to userspace, then the thread will
3890*4f1223e8SApple OSS Distributions * park with a preadoption thread group but no explicitly adopted
3891*4f1223e8SApple OSS Distributions * voucher or work interval.
3892*4f1223e8SApple OSS Distributions *
3893*4f1223e8SApple OSS Distributions * We drop the preadoption thread group here before proceeding to park.
3894*4f1223e8SApple OSS Distributions * Note - we may get preempted when we drop the workq lock below.
3895*4f1223e8SApple OSS Distributions *
3896*4f1223e8SApple OSS Distributions * Case 2:
3897*4f1223e8SApple OSS Distributions * Thread picked up a thread request and bound to it and returned back
3898*4f1223e8SApple OSS Distributions * from userspace and is parking. At this point, preadoption thread
3899*4f1223e8SApple OSS Distributions * group should be NULL since the thread has unbound from the thread
3900*4f1223e8SApple OSS Distributions * request. So this operation should be a no-op.
3901*4f1223e8SApple OSS Distributions */
3902*4f1223e8SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
3903*4f1223e8SApple OSS Distributions #endif
3904*4f1223e8SApple OSS Distributions
3905*4f1223e8SApple OSS Distributions if ((uth->uu_workq_flags & UT_WORKQ_IDLE_CLEANUP) &&
3906*4f1223e8SApple OSS Distributions !(uth->uu_workq_flags & UT_WORKQ_DYING)) {
3907*4f1223e8SApple OSS Distributions workq_unlock(wq);
3908*4f1223e8SApple OSS Distributions
3909*4f1223e8SApple OSS Distributions /*
3910*4f1223e8SApple OSS Distributions * workq_push_idle_thread() will unset `has_stack`
3911*4f1223e8SApple OSS Distributions * if it wants us to free the stack before parking.
3912*4f1223e8SApple OSS Distributions */
3913*4f1223e8SApple OSS Distributions if (!uth->uu_save.uus_workq_park_data.has_stack) {
3914*4f1223e8SApple OSS Distributions pthread_functions->workq_markfree_threadstack(p,
3915*4f1223e8SApple OSS Distributions get_machthread(uth), get_task_map(proc_task(p)),
3916*4f1223e8SApple OSS Distributions uth->uu_workq_stackaddr);
3917*4f1223e8SApple OSS Distributions }
3918*4f1223e8SApple OSS Distributions
3919*4f1223e8SApple OSS Distributions /*
3920*4f1223e8SApple OSS Distributions * When we remove the voucher from the thread, we may lose our importance
3921*4f1223e8SApple OSS Distributions * causing us to get preempted, so we do this after putting the thread on
3922*4f1223e8SApple OSS Distributions * the idle list. Then, when we get our importance back we'll be able to
3923*4f1223e8SApple OSS Distributions * use this thread from e.g. the kevent call out to deliver a boosting
3924*4f1223e8SApple OSS Distributions * message.
3925*4f1223e8SApple OSS Distributions *
3926*4f1223e8SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
3927*4f1223e8SApple OSS Distributions * thread since this thread could have become the creator again and
3928*4f1223e8SApple OSS Distributions * perhaps acquired a preadoption thread group.
3929*4f1223e8SApple OSS Distributions */
3930*4f1223e8SApple OSS Distributions __assert_only kern_return_t kr;
3931*4f1223e8SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
3932*4f1223e8SApple OSS Distributions assert(kr == KERN_SUCCESS);
3933*4f1223e8SApple OSS Distributions
3934*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
3935*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_IDLE_CLEANUP;
3936*4f1223e8SApple OSS Distributions setup_flags &= ~WQ_SETUP_CLEAR_VOUCHER;
3937*4f1223e8SApple OSS Distributions }
3938*4f1223e8SApple OSS Distributions
3939*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_END, wq, 0, 0, 0);
3940*4f1223e8SApple OSS Distributions
3941*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
3942*4f1223e8SApple OSS Distributions /*
3943*4f1223e8SApple OSS Distributions * While we'd dropped the lock to unset our voucher, someone came
3944*4f1223e8SApple OSS Distributions * around and made us runnable. But because we weren't waiting on the
3945*4f1223e8SApple OSS Distributions * event their thread_wakeup() was ineffectual. To correct for that,
3946*4f1223e8SApple OSS Distributions * we just run the continuation ourselves.
3947*4f1223e8SApple OSS Distributions */
3948*4f1223e8SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
3949*4f1223e8SApple OSS Distributions __builtin_unreachable();
3950*4f1223e8SApple OSS Distributions }
3951*4f1223e8SApple OSS Distributions
3952*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_DYING) {
3953*4f1223e8SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
3954*4f1223e8SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, setup_flags);
3955*4f1223e8SApple OSS Distributions __builtin_unreachable();
3956*4f1223e8SApple OSS Distributions }
3957*4f1223e8SApple OSS Distributions
3958*4f1223e8SApple OSS Distributions /* Disarm the workqueue quantum since the thread is now idle */
3959*4f1223e8SApple OSS Distributions thread_disarm_workqueue_quantum(get_machthread(uth));
3960*4f1223e8SApple OSS Distributions
3961*4f1223e8SApple OSS Distributions thread_set_pending_block_hint(get_machthread(uth), kThreadWaitParkedWorkQueue);
3962*4f1223e8SApple OSS Distributions assert_wait(workq_parked_wait_event(uth), THREAD_INTERRUPTIBLE);
3963*4f1223e8SApple OSS Distributions workq_unlock(wq);
3964*4f1223e8SApple OSS Distributions thread_block(workq_unpark_continue);
3965*4f1223e8SApple OSS Distributions __builtin_unreachable();
3966*4f1223e8SApple OSS Distributions }
3967*4f1223e8SApple OSS Distributions
3968*4f1223e8SApple OSS Distributions static inline bool
workq_may_start_event_mgr_thread(struct workqueue * wq,struct uthread * uth)3969*4f1223e8SApple OSS Distributions workq_may_start_event_mgr_thread(struct workqueue *wq, struct uthread *uth)
3970*4f1223e8SApple OSS Distributions {
3971*4f1223e8SApple OSS Distributions /*
3972*4f1223e8SApple OSS Distributions * There's an event manager request and either:
3973*4f1223e8SApple OSS Distributions * - no event manager currently running
3974*4f1223e8SApple OSS Distributions * - we are re-using the event manager
3975*4f1223e8SApple OSS Distributions */
3976*4f1223e8SApple OSS Distributions return wq->wq_thscheduled_count[_wq_bucket(WORKQ_THREAD_QOS_MANAGER)] == 0 ||
3977*4f1223e8SApple OSS Distributions (uth && uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER);
3978*4f1223e8SApple OSS Distributions }
3979*4f1223e8SApple OSS Distributions
3980*4f1223e8SApple OSS Distributions /* Called with workq lock held. */
3981*4f1223e8SApple OSS Distributions static uint32_t
workq_constrained_allowance(struct workqueue * wq,thread_qos_t at_qos,struct uthread * uth,bool may_start_timer,bool record_failed_allowance)3982*4f1223e8SApple OSS Distributions workq_constrained_allowance(struct workqueue *wq, thread_qos_t at_qos,
3983*4f1223e8SApple OSS Distributions struct uthread *uth, bool may_start_timer, bool record_failed_allowance)
3984*4f1223e8SApple OSS Distributions {
3985*4f1223e8SApple OSS Distributions assert(at_qos != WORKQ_THREAD_QOS_MANAGER);
3986*4f1223e8SApple OSS Distributions uint32_t allowance_passed = 0;
3987*4f1223e8SApple OSS Distributions uint32_t count = 0;
3988*4f1223e8SApple OSS Distributions
3989*4f1223e8SApple OSS Distributions uint32_t max_count = wq->wq_constrained_threads_scheduled;
3990*4f1223e8SApple OSS Distributions if (uth && workq_thread_is_nonovercommit(uth)) {
3991*4f1223e8SApple OSS Distributions /*
3992*4f1223e8SApple OSS Distributions * don't count the current thread as scheduled
3993*4f1223e8SApple OSS Distributions */
3994*4f1223e8SApple OSS Distributions assert(max_count > 0);
3995*4f1223e8SApple OSS Distributions max_count--;
3996*4f1223e8SApple OSS Distributions }
3997*4f1223e8SApple OSS Distributions if (max_count >= wq_max_constrained_threads) {
3998*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 1,
3999*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled,
4000*4f1223e8SApple OSS Distributions wq_max_constrained_threads);
4001*4f1223e8SApple OSS Distributions /*
4002*4f1223e8SApple OSS Distributions * we need 1 or more constrained threads to return to the kernel before
4003*4f1223e8SApple OSS Distributions * we can dispatch additional work
4004*4f1223e8SApple OSS Distributions */
4005*4f1223e8SApple OSS Distributions allowance_passed = 0;
4006*4f1223e8SApple OSS Distributions goto out;
4007*4f1223e8SApple OSS Distributions }
4008*4f1223e8SApple OSS Distributions max_count -= wq_max_constrained_threads;
4009*4f1223e8SApple OSS Distributions
4010*4f1223e8SApple OSS Distributions /*
4011*4f1223e8SApple OSS Distributions * Compute a metric for many how many threads are active. We find the
4012*4f1223e8SApple OSS Distributions * highest priority request outstanding and then add up the number of active
4013*4f1223e8SApple OSS Distributions * threads in that and all higher-priority buckets. We'll also add any
4014*4f1223e8SApple OSS Distributions * "busy" threads which are not currently active but blocked recently enough
4015*4f1223e8SApple OSS Distributions * that we can't be sure that they won't be unblocked soon and start
4016*4f1223e8SApple OSS Distributions * being active again.
4017*4f1223e8SApple OSS Distributions *
4018*4f1223e8SApple OSS Distributions * We'll then compare this metric to our max concurrency to decide whether
4019*4f1223e8SApple OSS Distributions * to add a new thread.
4020*4f1223e8SApple OSS Distributions */
4021*4f1223e8SApple OSS Distributions
4022*4f1223e8SApple OSS Distributions uint32_t busycount, thactive_count;
4023*4f1223e8SApple OSS Distributions
4024*4f1223e8SApple OSS Distributions thactive_count = _wq_thactive_aggregate_downto_qos(wq, _wq_thactive(wq),
4025*4f1223e8SApple OSS Distributions at_qos, &busycount, NULL);
4026*4f1223e8SApple OSS Distributions
4027*4f1223e8SApple OSS Distributions if (uth && uth->uu_workq_pri.qos_bucket != WORKQ_THREAD_QOS_MANAGER &&
4028*4f1223e8SApple OSS Distributions at_qos <= uth->uu_workq_pri.qos_bucket) {
4029*4f1223e8SApple OSS Distributions /*
4030*4f1223e8SApple OSS Distributions * Don't count this thread as currently active, but only if it's not
4031*4f1223e8SApple OSS Distributions * a manager thread, as _wq_thactive_aggregate_downto_qos ignores active
4032*4f1223e8SApple OSS Distributions * managers.
4033*4f1223e8SApple OSS Distributions */
4034*4f1223e8SApple OSS Distributions assert(thactive_count > 0);
4035*4f1223e8SApple OSS Distributions thactive_count--;
4036*4f1223e8SApple OSS Distributions }
4037*4f1223e8SApple OSS Distributions
4038*4f1223e8SApple OSS Distributions count = wq_max_parallelism[_wq_bucket(at_qos)];
4039*4f1223e8SApple OSS Distributions if (count > thactive_count + busycount) {
4040*4f1223e8SApple OSS Distributions count -= thactive_count + busycount;
4041*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 2,
4042*4f1223e8SApple OSS Distributions thactive_count, busycount);
4043*4f1223e8SApple OSS Distributions allowance_passed = MIN(count, max_count);
4044*4f1223e8SApple OSS Distributions goto out;
4045*4f1223e8SApple OSS Distributions } else {
4046*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_constrained_admission | DBG_FUNC_NONE, wq, 3,
4047*4f1223e8SApple OSS Distributions thactive_count, busycount);
4048*4f1223e8SApple OSS Distributions allowance_passed = 0;
4049*4f1223e8SApple OSS Distributions }
4050*4f1223e8SApple OSS Distributions
4051*4f1223e8SApple OSS Distributions if (may_start_timer) {
4052*4f1223e8SApple OSS Distributions /*
4053*4f1223e8SApple OSS Distributions * If this is called from the add timer, we won't have another timer
4054*4f1223e8SApple OSS Distributions * fire when the thread exits the "busy" state, so rearm the timer.
4055*4f1223e8SApple OSS Distributions */
4056*4f1223e8SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4057*4f1223e8SApple OSS Distributions }
4058*4f1223e8SApple OSS Distributions
4059*4f1223e8SApple OSS Distributions out:
4060*4f1223e8SApple OSS Distributions if (record_failed_allowance) {
4061*4f1223e8SApple OSS Distributions wq->wq_exceeded_active_constrained_thread_limit = !allowance_passed;
4062*4f1223e8SApple OSS Distributions }
4063*4f1223e8SApple OSS Distributions return allowance_passed;
4064*4f1223e8SApple OSS Distributions }
4065*4f1223e8SApple OSS Distributions
4066*4f1223e8SApple OSS Distributions static bool
workq_threadreq_admissible(struct workqueue * wq,struct uthread * uth,workq_threadreq_t req)4067*4f1223e8SApple OSS Distributions workq_threadreq_admissible(struct workqueue *wq, struct uthread *uth,
4068*4f1223e8SApple OSS Distributions workq_threadreq_t req)
4069*4f1223e8SApple OSS Distributions {
4070*4f1223e8SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
4071*4f1223e8SApple OSS Distributions return workq_may_start_event_mgr_thread(wq, uth);
4072*4f1223e8SApple OSS Distributions }
4073*4f1223e8SApple OSS Distributions if (workq_threadreq_is_cooperative(req)) {
4074*4f1223e8SApple OSS Distributions return workq_cooperative_allowance(wq, req->tr_qos, uth, true);
4075*4f1223e8SApple OSS Distributions }
4076*4f1223e8SApple OSS Distributions if (workq_threadreq_is_nonovercommit(req)) {
4077*4f1223e8SApple OSS Distributions return workq_constrained_allowance(wq, req->tr_qos, uth, true, true);
4078*4f1223e8SApple OSS Distributions }
4079*4f1223e8SApple OSS Distributions
4080*4f1223e8SApple OSS Distributions return true;
4081*4f1223e8SApple OSS Distributions }
4082*4f1223e8SApple OSS Distributions
4083*4f1223e8SApple OSS Distributions /*
4084*4f1223e8SApple OSS Distributions * Called from the context of selecting thread requests for threads returning
4085*4f1223e8SApple OSS Distributions * from userspace or creator thread
4086*4f1223e8SApple OSS Distributions */
4087*4f1223e8SApple OSS Distributions static workq_threadreq_t
workq_cooperative_queue_best_req(struct workqueue * wq,struct uthread * uth)4088*4f1223e8SApple OSS Distributions workq_cooperative_queue_best_req(struct workqueue *wq, struct uthread *uth)
4089*4f1223e8SApple OSS Distributions {
4090*4f1223e8SApple OSS Distributions workq_lock_held(wq);
4091*4f1223e8SApple OSS Distributions
4092*4f1223e8SApple OSS Distributions /*
4093*4f1223e8SApple OSS Distributions * If the current thread is cooperative, we need to exclude it as part of
4094*4f1223e8SApple OSS Distributions * cooperative schedule count since this thread is looking for a new
4095*4f1223e8SApple OSS Distributions * request. Change in the schedule count for cooperative pool therefore
4096*4f1223e8SApple OSS Distributions * requires us to reeevaluate the next best request for it.
4097*4f1223e8SApple OSS Distributions */
4098*4f1223e8SApple OSS Distributions if (uth && workq_thread_is_cooperative(uth)) {
4099*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, uth->uu_workq_pri.qos_req);
4100*4f1223e8SApple OSS Distributions
4101*4f1223e8SApple OSS Distributions (void) _wq_cooperative_queue_refresh_best_req_qos(wq);
4102*4f1223e8SApple OSS Distributions
4103*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, uth->uu_workq_pri.qos_req);
4104*4f1223e8SApple OSS Distributions } else {
4105*4f1223e8SApple OSS Distributions /*
4106*4f1223e8SApple OSS Distributions * The old value that was already precomputed should be safe to use -
4107*4f1223e8SApple OSS Distributions * add an assert that asserts that the best req QoS doesn't change in
4108*4f1223e8SApple OSS Distributions * this case
4109*4f1223e8SApple OSS Distributions */
4110*4f1223e8SApple OSS Distributions assert(_wq_cooperative_queue_refresh_best_req_qos(wq) == false);
4111*4f1223e8SApple OSS Distributions }
4112*4f1223e8SApple OSS Distributions
4113*4f1223e8SApple OSS Distributions thread_qos_t qos = wq->wq_cooperative_queue_best_req_qos;
4114*4f1223e8SApple OSS Distributions
4115*4f1223e8SApple OSS Distributions /* There are no eligible requests in the cooperative pool */
4116*4f1223e8SApple OSS Distributions if (qos == THREAD_QOS_UNSPECIFIED) {
4117*4f1223e8SApple OSS Distributions return NULL;
4118*4f1223e8SApple OSS Distributions }
4119*4f1223e8SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_ABOVEUI);
4120*4f1223e8SApple OSS Distributions assert(qos != WORKQ_THREAD_QOS_MANAGER);
4121*4f1223e8SApple OSS Distributions
4122*4f1223e8SApple OSS Distributions uint8_t bucket = _wq_bucket(qos);
4123*4f1223e8SApple OSS Distributions assert(!STAILQ_EMPTY(&wq->wq_cooperative_queue[bucket]));
4124*4f1223e8SApple OSS Distributions
4125*4f1223e8SApple OSS Distributions return STAILQ_FIRST(&wq->wq_cooperative_queue[bucket]);
4126*4f1223e8SApple OSS Distributions }
4127*4f1223e8SApple OSS Distributions
4128*4f1223e8SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select_for_creator(struct workqueue * wq)4129*4f1223e8SApple OSS Distributions workq_threadreq_select_for_creator(struct workqueue *wq)
4130*4f1223e8SApple OSS Distributions {
4131*4f1223e8SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4132*4f1223e8SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4133*4f1223e8SApple OSS Distributions uint8_t pri = 0;
4134*4f1223e8SApple OSS Distributions
4135*4f1223e8SApple OSS Distributions /*
4136*4f1223e8SApple OSS Distributions * Compute the best priority request, and ignore the turnstile for now
4137*4f1223e8SApple OSS Distributions */
4138*4f1223e8SApple OSS Distributions
4139*4f1223e8SApple OSS Distributions req_pri = priority_queue_max(&wq->wq_special_queue,
4140*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4141*4f1223e8SApple OSS Distributions if (req_pri) {
4142*4f1223e8SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4143*4f1223e8SApple OSS Distributions &req_pri->tr_entry);
4144*4f1223e8SApple OSS Distributions }
4145*4f1223e8SApple OSS Distributions
4146*4f1223e8SApple OSS Distributions /*
4147*4f1223e8SApple OSS Distributions * Handle the manager thread request. The special queue might yield
4148*4f1223e8SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4149*4f1223e8SApple OSS Distributions */
4150*4f1223e8SApple OSS Distributions
4151*4f1223e8SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4152*4f1223e8SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, NULL)) {
4153*4f1223e8SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4154*4f1223e8SApple OSS Distributions
4155*4f1223e8SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4156*4f1223e8SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4157*4f1223e8SApple OSS Distributions } else {
4158*4f1223e8SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4159*4f1223e8SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4160*4f1223e8SApple OSS Distributions }
4161*4f1223e8SApple OSS Distributions
4162*4f1223e8SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4163*4f1223e8SApple OSS Distributions }
4164*4f1223e8SApple OSS Distributions
4165*4f1223e8SApple OSS Distributions /*
4166*4f1223e8SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4167*4f1223e8SApple OSS Distributions *
4168*4f1223e8SApple OSS Distributions * Start by comparing the overcommit and the cooperative pool
4169*4f1223e8SApple OSS Distributions */
4170*4f1223e8SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4171*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4172*4f1223e8SApple OSS Distributions if (req_qos) {
4173*4f1223e8SApple OSS Distributions qos = req_qos->tr_qos;
4174*4f1223e8SApple OSS Distributions }
4175*4f1223e8SApple OSS Distributions
4176*4f1223e8SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, NULL);
4177*4f1223e8SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4178*4f1223e8SApple OSS Distributions /*
4179*4f1223e8SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4180*4f1223e8SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4181*4f1223e8SApple OSS Distributions * cooperative.
4182*4f1223e8SApple OSS Distributions *
4183*4f1223e8SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4184*4f1223e8SApple OSS Distributions */
4185*4f1223e8SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, NULL, true)) {
4186*4f1223e8SApple OSS Distributions req_qos = req_tmp;
4187*4f1223e8SApple OSS Distributions qos = req_qos->tr_qos;
4188*4f1223e8SApple OSS Distributions }
4189*4f1223e8SApple OSS Distributions }
4190*4f1223e8SApple OSS Distributions
4191*4f1223e8SApple OSS Distributions /*
4192*4f1223e8SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4193*4f1223e8SApple OSS Distributions * pool - and compare it with the constrained pool
4194*4f1223e8SApple OSS Distributions */
4195*4f1223e8SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4196*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4197*4f1223e8SApple OSS Distributions
4198*4f1223e8SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4199*4f1223e8SApple OSS Distributions /*
4200*4f1223e8SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4201*4f1223e8SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4202*4f1223e8SApple OSS Distributions */
4203*4f1223e8SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4204*4f1223e8SApple OSS Distributions return req_pri;
4205*4f1223e8SApple OSS Distributions }
4206*4f1223e8SApple OSS Distributions
4207*4f1223e8SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, NULL, true, true)) {
4208*4f1223e8SApple OSS Distributions /*
4209*4f1223e8SApple OSS Distributions * If the constrained thread request is the best one and passes
4210*4f1223e8SApple OSS Distributions * the admission check, pick it.
4211*4f1223e8SApple OSS Distributions */
4212*4f1223e8SApple OSS Distributions return req_tmp;
4213*4f1223e8SApple OSS Distributions }
4214*4f1223e8SApple OSS Distributions }
4215*4f1223e8SApple OSS Distributions
4216*4f1223e8SApple OSS Distributions /*
4217*4f1223e8SApple OSS Distributions * Compare the best of the QoS world with the priority
4218*4f1223e8SApple OSS Distributions */
4219*4f1223e8SApple OSS Distributions if (pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4220*4f1223e8SApple OSS Distributions return req_pri;
4221*4f1223e8SApple OSS Distributions }
4222*4f1223e8SApple OSS Distributions
4223*4f1223e8SApple OSS Distributions if (req_qos) {
4224*4f1223e8SApple OSS Distributions return req_qos;
4225*4f1223e8SApple OSS Distributions }
4226*4f1223e8SApple OSS Distributions
4227*4f1223e8SApple OSS Distributions /*
4228*4f1223e8SApple OSS Distributions * If we had no eligible request but we have a turnstile push,
4229*4f1223e8SApple OSS Distributions * it must be a non overcommit thread request that failed
4230*4f1223e8SApple OSS Distributions * the admission check.
4231*4f1223e8SApple OSS Distributions *
4232*4f1223e8SApple OSS Distributions * Just fake a BG thread request so that if the push stops the creator
4233*4f1223e8SApple OSS Distributions * priority just drops to 4.
4234*4f1223e8SApple OSS Distributions */
4235*4f1223e8SApple OSS Distributions if (turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile, NULL)) {
4236*4f1223e8SApple OSS Distributions static struct workq_threadreq_s workq_sync_push_fake_req = {
4237*4f1223e8SApple OSS Distributions .tr_qos = THREAD_QOS_BACKGROUND,
4238*4f1223e8SApple OSS Distributions };
4239*4f1223e8SApple OSS Distributions
4240*4f1223e8SApple OSS Distributions return &workq_sync_push_fake_req;
4241*4f1223e8SApple OSS Distributions }
4242*4f1223e8SApple OSS Distributions
4243*4f1223e8SApple OSS Distributions return NULL;
4244*4f1223e8SApple OSS Distributions }
4245*4f1223e8SApple OSS Distributions
4246*4f1223e8SApple OSS Distributions /*
4247*4f1223e8SApple OSS Distributions * Returns true if this caused a change in the schedule counts of the
4248*4f1223e8SApple OSS Distributions * cooperative pool
4249*4f1223e8SApple OSS Distributions */
4250*4f1223e8SApple OSS Distributions static bool
workq_adjust_cooperative_constrained_schedule_counts(struct workqueue * wq,struct uthread * uth,thread_qos_t old_thread_qos,workq_tr_flags_t tr_flags)4251*4f1223e8SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(struct workqueue *wq,
4252*4f1223e8SApple OSS Distributions struct uthread *uth, thread_qos_t old_thread_qos, workq_tr_flags_t tr_flags)
4253*4f1223e8SApple OSS Distributions {
4254*4f1223e8SApple OSS Distributions workq_lock_held(wq);
4255*4f1223e8SApple OSS Distributions
4256*4f1223e8SApple OSS Distributions /*
4257*4f1223e8SApple OSS Distributions * Row: thread type
4258*4f1223e8SApple OSS Distributions * Column: Request type
4259*4f1223e8SApple OSS Distributions *
4260*4f1223e8SApple OSS Distributions * overcommit non-overcommit cooperative
4261*4f1223e8SApple OSS Distributions * overcommit X case 1 case 2
4262*4f1223e8SApple OSS Distributions * cooperative case 3 case 4 case 5
4263*4f1223e8SApple OSS Distributions * non-overcommit case 6 X case 7
4264*4f1223e8SApple OSS Distributions *
4265*4f1223e8SApple OSS Distributions * Move the thread to the right bucket depending on what state it currently
4266*4f1223e8SApple OSS Distributions * has and what state the thread req it picks, is going to have.
4267*4f1223e8SApple OSS Distributions *
4268*4f1223e8SApple OSS Distributions * Note that the creator thread is an overcommit thread.
4269*4f1223e8SApple OSS Distributions */
4270*4f1223e8SApple OSS Distributions thread_qos_t new_thread_qos = uth->uu_workq_pri.qos_req;
4271*4f1223e8SApple OSS Distributions
4272*4f1223e8SApple OSS Distributions /*
4273*4f1223e8SApple OSS Distributions * Anytime a cooperative bucket's schedule count changes, we need to
4274*4f1223e8SApple OSS Distributions * potentially refresh the next best QoS for that pool when we determine
4275*4f1223e8SApple OSS Distributions * the next request for the creator
4276*4f1223e8SApple OSS Distributions */
4277*4f1223e8SApple OSS Distributions bool cooperative_pool_sched_count_changed = false;
4278*4f1223e8SApple OSS Distributions
4279*4f1223e8SApple OSS Distributions if (workq_thread_is_overcommit(uth)) {
4280*4f1223e8SApple OSS Distributions if (workq_tr_is_nonovercommit(tr_flags)) {
4281*4f1223e8SApple OSS Distributions // Case 1: thread is overcommit, req is non-overcommit
4282*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4283*4f1223e8SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4284*4f1223e8SApple OSS Distributions // Case 2: thread is overcommit, req is cooperative
4285*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4286*4f1223e8SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4287*4f1223e8SApple OSS Distributions }
4288*4f1223e8SApple OSS Distributions } else if (workq_thread_is_cooperative(uth)) {
4289*4f1223e8SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4290*4f1223e8SApple OSS Distributions // Case 3: thread is cooperative, req is overcommit
4291*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4292*4f1223e8SApple OSS Distributions } else if (workq_tr_is_nonovercommit(tr_flags)) {
4293*4f1223e8SApple OSS Distributions // Case 4: thread is cooperative, req is non-overcommit
4294*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4295*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled++;
4296*4f1223e8SApple OSS Distributions } else {
4297*4f1223e8SApple OSS Distributions // Case 5: thread is cooperative, req is also cooperative
4298*4f1223e8SApple OSS Distributions assert(workq_tr_is_cooperative(tr_flags));
4299*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_dec(wq, old_thread_qos);
4300*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4301*4f1223e8SApple OSS Distributions }
4302*4f1223e8SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4303*4f1223e8SApple OSS Distributions } else {
4304*4f1223e8SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4305*4f1223e8SApple OSS Distributions // Case 6: Thread is non-overcommit, req is overcommit
4306*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4307*4f1223e8SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4308*4f1223e8SApple OSS Distributions // Case 7: Thread is non-overcommit, req is cooperative
4309*4f1223e8SApple OSS Distributions wq->wq_constrained_threads_scheduled--;
4310*4f1223e8SApple OSS Distributions _wq_cooperative_queue_scheduled_count_inc(wq, new_thread_qos);
4311*4f1223e8SApple OSS Distributions cooperative_pool_sched_count_changed = true;
4312*4f1223e8SApple OSS Distributions }
4313*4f1223e8SApple OSS Distributions }
4314*4f1223e8SApple OSS Distributions
4315*4f1223e8SApple OSS Distributions return cooperative_pool_sched_count_changed;
4316*4f1223e8SApple OSS Distributions }
4317*4f1223e8SApple OSS Distributions
4318*4f1223e8SApple OSS Distributions static workq_threadreq_t
workq_threadreq_select(struct workqueue * wq,struct uthread * uth)4319*4f1223e8SApple OSS Distributions workq_threadreq_select(struct workqueue *wq, struct uthread *uth)
4320*4f1223e8SApple OSS Distributions {
4321*4f1223e8SApple OSS Distributions workq_threadreq_t req_qos, req_pri, req_tmp, req_mgr;
4322*4f1223e8SApple OSS Distributions uintptr_t proprietor;
4323*4f1223e8SApple OSS Distributions thread_qos_t qos = THREAD_QOS_UNSPECIFIED;
4324*4f1223e8SApple OSS Distributions uint8_t pri = 0;
4325*4f1223e8SApple OSS Distributions
4326*4f1223e8SApple OSS Distributions if (uth == wq->wq_creator) {
4327*4f1223e8SApple OSS Distributions uth = NULL;
4328*4f1223e8SApple OSS Distributions }
4329*4f1223e8SApple OSS Distributions
4330*4f1223e8SApple OSS Distributions /*
4331*4f1223e8SApple OSS Distributions * Compute the best priority request (special or turnstile)
4332*4f1223e8SApple OSS Distributions */
4333*4f1223e8SApple OSS Distributions
4334*4f1223e8SApple OSS Distributions pri = (uint8_t)turnstile_workq_proprietor_of_max_turnstile(wq->wq_turnstile,
4335*4f1223e8SApple OSS Distributions &proprietor);
4336*4f1223e8SApple OSS Distributions if (pri) {
4337*4f1223e8SApple OSS Distributions struct kqworkloop *kqwl = (struct kqworkloop *)proprietor;
4338*4f1223e8SApple OSS Distributions req_pri = &kqwl->kqwl_request;
4339*4f1223e8SApple OSS Distributions if (req_pri->tr_state != WORKQ_TR_STATE_QUEUED) {
4340*4f1223e8SApple OSS Distributions panic("Invalid thread request (%p) state %d",
4341*4f1223e8SApple OSS Distributions req_pri, req_pri->tr_state);
4342*4f1223e8SApple OSS Distributions }
4343*4f1223e8SApple OSS Distributions } else {
4344*4f1223e8SApple OSS Distributions req_pri = NULL;
4345*4f1223e8SApple OSS Distributions }
4346*4f1223e8SApple OSS Distributions
4347*4f1223e8SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_special_queue,
4348*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4349*4f1223e8SApple OSS Distributions if (req_tmp && pri < priority_queue_entry_sched_pri(&wq->wq_special_queue,
4350*4f1223e8SApple OSS Distributions &req_tmp->tr_entry)) {
4351*4f1223e8SApple OSS Distributions req_pri = req_tmp;
4352*4f1223e8SApple OSS Distributions pri = (uint8_t)priority_queue_entry_sched_pri(&wq->wq_special_queue,
4353*4f1223e8SApple OSS Distributions &req_tmp->tr_entry);
4354*4f1223e8SApple OSS Distributions }
4355*4f1223e8SApple OSS Distributions
4356*4f1223e8SApple OSS Distributions /*
4357*4f1223e8SApple OSS Distributions * Handle the manager thread request. The special queue might yield
4358*4f1223e8SApple OSS Distributions * a higher priority, but the manager always beats the QoS world.
4359*4f1223e8SApple OSS Distributions */
4360*4f1223e8SApple OSS Distributions
4361*4f1223e8SApple OSS Distributions req_mgr = wq->wq_event_manager_threadreq;
4362*4f1223e8SApple OSS Distributions if (req_mgr && workq_may_start_event_mgr_thread(wq, uth)) {
4363*4f1223e8SApple OSS Distributions uint32_t mgr_pri = wq->wq_event_manager_priority;
4364*4f1223e8SApple OSS Distributions
4365*4f1223e8SApple OSS Distributions if (mgr_pri & _PTHREAD_PRIORITY_SCHED_PRI_FLAG) {
4366*4f1223e8SApple OSS Distributions mgr_pri &= _PTHREAD_PRIORITY_SCHED_PRI_MASK;
4367*4f1223e8SApple OSS Distributions } else {
4368*4f1223e8SApple OSS Distributions mgr_pri = thread_workq_pri_for_qos(
4369*4f1223e8SApple OSS Distributions _pthread_priority_thread_qos(mgr_pri));
4370*4f1223e8SApple OSS Distributions }
4371*4f1223e8SApple OSS Distributions
4372*4f1223e8SApple OSS Distributions return mgr_pri >= pri ? req_mgr : req_pri;
4373*4f1223e8SApple OSS Distributions }
4374*4f1223e8SApple OSS Distributions
4375*4f1223e8SApple OSS Distributions /*
4376*4f1223e8SApple OSS Distributions * Compute the best QoS Request, and check whether it beats the "pri" one
4377*4f1223e8SApple OSS Distributions */
4378*4f1223e8SApple OSS Distributions
4379*4f1223e8SApple OSS Distributions req_qos = priority_queue_max(&wq->wq_overcommit_queue,
4380*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4381*4f1223e8SApple OSS Distributions if (req_qos) {
4382*4f1223e8SApple OSS Distributions qos = req_qos->tr_qos;
4383*4f1223e8SApple OSS Distributions }
4384*4f1223e8SApple OSS Distributions
4385*4f1223e8SApple OSS Distributions req_tmp = workq_cooperative_queue_best_req(wq, uth);
4386*4f1223e8SApple OSS Distributions if (req_tmp && qos <= req_tmp->tr_qos) {
4387*4f1223e8SApple OSS Distributions /*
4388*4f1223e8SApple OSS Distributions * Cooperative TR is better between overcommit and cooperative. Note
4389*4f1223e8SApple OSS Distributions * that if qos is same between overcommit and cooperative, we choose
4390*4f1223e8SApple OSS Distributions * cooperative.
4391*4f1223e8SApple OSS Distributions *
4392*4f1223e8SApple OSS Distributions * Pick cooperative pool if it passes the admissions check
4393*4f1223e8SApple OSS Distributions */
4394*4f1223e8SApple OSS Distributions if (workq_cooperative_allowance(wq, req_tmp->tr_qos, uth, true)) {
4395*4f1223e8SApple OSS Distributions req_qos = req_tmp;
4396*4f1223e8SApple OSS Distributions qos = req_qos->tr_qos;
4397*4f1223e8SApple OSS Distributions }
4398*4f1223e8SApple OSS Distributions }
4399*4f1223e8SApple OSS Distributions
4400*4f1223e8SApple OSS Distributions /*
4401*4f1223e8SApple OSS Distributions * Compare the best QoS so far - either from overcommit or from cooperative
4402*4f1223e8SApple OSS Distributions * pool - and compare it with the constrained pool
4403*4f1223e8SApple OSS Distributions */
4404*4f1223e8SApple OSS Distributions req_tmp = priority_queue_max(&wq->wq_constrained_queue,
4405*4f1223e8SApple OSS Distributions struct workq_threadreq_s, tr_entry);
4406*4f1223e8SApple OSS Distributions
4407*4f1223e8SApple OSS Distributions if (req_tmp && qos < req_tmp->tr_qos) {
4408*4f1223e8SApple OSS Distributions /*
4409*4f1223e8SApple OSS Distributions * Constrained pool is best in QoS between overcommit, cooperative
4410*4f1223e8SApple OSS Distributions * and constrained. Now check how it fairs against the priority case
4411*4f1223e8SApple OSS Distributions */
4412*4f1223e8SApple OSS Distributions if (pri && pri >= thread_workq_pri_for_qos(req_tmp->tr_qos)) {
4413*4f1223e8SApple OSS Distributions return req_pri;
4414*4f1223e8SApple OSS Distributions }
4415*4f1223e8SApple OSS Distributions
4416*4f1223e8SApple OSS Distributions if (workq_constrained_allowance(wq, req_tmp->tr_qos, uth, true, true)) {
4417*4f1223e8SApple OSS Distributions /*
4418*4f1223e8SApple OSS Distributions * If the constrained thread request is the best one and passes
4419*4f1223e8SApple OSS Distributions * the admission check, pick it.
4420*4f1223e8SApple OSS Distributions */
4421*4f1223e8SApple OSS Distributions return req_tmp;
4422*4f1223e8SApple OSS Distributions }
4423*4f1223e8SApple OSS Distributions }
4424*4f1223e8SApple OSS Distributions
4425*4f1223e8SApple OSS Distributions if (req_pri && (!qos || pri >= thread_workq_pri_for_qos(qos))) {
4426*4f1223e8SApple OSS Distributions return req_pri;
4427*4f1223e8SApple OSS Distributions }
4428*4f1223e8SApple OSS Distributions
4429*4f1223e8SApple OSS Distributions return req_qos;
4430*4f1223e8SApple OSS Distributions }
4431*4f1223e8SApple OSS Distributions
4432*4f1223e8SApple OSS Distributions /*
4433*4f1223e8SApple OSS Distributions * The creator is an anonymous thread that is counted as scheduled,
4434*4f1223e8SApple OSS Distributions * but otherwise without its scheduler callback set or tracked as active
4435*4f1223e8SApple OSS Distributions * that is used to make other threads.
4436*4f1223e8SApple OSS Distributions *
4437*4f1223e8SApple OSS Distributions * When more requests are added or an existing one is hurried along,
4438*4f1223e8SApple OSS Distributions * a creator is elected and setup, or the existing one overridden accordingly.
4439*4f1223e8SApple OSS Distributions *
4440*4f1223e8SApple OSS Distributions * While this creator is in flight, because no request has been dequeued,
4441*4f1223e8SApple OSS Distributions * already running threads have a chance at stealing thread requests avoiding
4442*4f1223e8SApple OSS Distributions * useless context switches, and the creator once scheduled may not find any
4443*4f1223e8SApple OSS Distributions * work to do and will then just park again.
4444*4f1223e8SApple OSS Distributions *
4445*4f1223e8SApple OSS Distributions * The creator serves the dual purpose of informing the scheduler of work that
4446*4f1223e8SApple OSS Distributions * hasn't be materialized as threads yet, and also as a natural pacing mechanism
4447*4f1223e8SApple OSS Distributions * for thread creation.
4448*4f1223e8SApple OSS Distributions *
4449*4f1223e8SApple OSS Distributions * By being anonymous (and not bound to anything) it means that thread requests
4450*4f1223e8SApple OSS Distributions * can be stolen from this creator by threads already on core yielding more
4451*4f1223e8SApple OSS Distributions * efficient scheduling and reduced context switches.
4452*4f1223e8SApple OSS Distributions */
4453*4f1223e8SApple OSS Distributions static void
workq_schedule_creator(proc_t p,struct workqueue * wq,workq_kern_threadreq_flags_t flags)4454*4f1223e8SApple OSS Distributions workq_schedule_creator(proc_t p, struct workqueue *wq,
4455*4f1223e8SApple OSS Distributions workq_kern_threadreq_flags_t flags)
4456*4f1223e8SApple OSS Distributions {
4457*4f1223e8SApple OSS Distributions workq_threadreq_t req;
4458*4f1223e8SApple OSS Distributions struct uthread *uth;
4459*4f1223e8SApple OSS Distributions bool needs_wakeup;
4460*4f1223e8SApple OSS Distributions
4461*4f1223e8SApple OSS Distributions workq_lock_held(wq);
4462*4f1223e8SApple OSS Distributions assert(p || (flags & WORKQ_THREADREQ_CAN_CREATE_THREADS) == 0);
4463*4f1223e8SApple OSS Distributions
4464*4f1223e8SApple OSS Distributions again:
4465*4f1223e8SApple OSS Distributions uth = wq->wq_creator;
4466*4f1223e8SApple OSS Distributions
4467*4f1223e8SApple OSS Distributions if (!wq->wq_reqcount) {
4468*4f1223e8SApple OSS Distributions /*
4469*4f1223e8SApple OSS Distributions * There is no thread request left.
4470*4f1223e8SApple OSS Distributions *
4471*4f1223e8SApple OSS Distributions * If there is a creator, leave everything in place, so that it cleans
4472*4f1223e8SApple OSS Distributions * up itself in workq_push_idle_thread().
4473*4f1223e8SApple OSS Distributions *
4474*4f1223e8SApple OSS Distributions * Else, make sure the turnstile state is reset to no inheritor.
4475*4f1223e8SApple OSS Distributions */
4476*4f1223e8SApple OSS Distributions if (uth == NULL) {
4477*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4478*4f1223e8SApple OSS Distributions }
4479*4f1223e8SApple OSS Distributions return;
4480*4f1223e8SApple OSS Distributions }
4481*4f1223e8SApple OSS Distributions
4482*4f1223e8SApple OSS Distributions req = workq_threadreq_select_for_creator(wq);
4483*4f1223e8SApple OSS Distributions if (req == NULL) {
4484*4f1223e8SApple OSS Distributions /*
4485*4f1223e8SApple OSS Distributions * There isn't a thread request that passes the admission check.
4486*4f1223e8SApple OSS Distributions *
4487*4f1223e8SApple OSS Distributions * If there is a creator, do not touch anything, the creator will sort
4488*4f1223e8SApple OSS Distributions * it out when it runs.
4489*4f1223e8SApple OSS Distributions *
4490*4f1223e8SApple OSS Distributions * Else, set the inheritor to "WORKQ" so that the turnstile propagation
4491*4f1223e8SApple OSS Distributions * code calls us if anything changes.
4492*4f1223e8SApple OSS Distributions */
4493*4f1223e8SApple OSS Distributions if (uth == NULL) {
4494*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, wq, TURNSTILE_INHERITOR_WORKQ);
4495*4f1223e8SApple OSS Distributions }
4496*4f1223e8SApple OSS Distributions return;
4497*4f1223e8SApple OSS Distributions }
4498*4f1223e8SApple OSS Distributions
4499*4f1223e8SApple OSS Distributions
4500*4f1223e8SApple OSS Distributions if (uth) {
4501*4f1223e8SApple OSS Distributions /*
4502*4f1223e8SApple OSS Distributions * We need to maybe override the creator we already have
4503*4f1223e8SApple OSS Distributions */
4504*4f1223e8SApple OSS Distributions if (workq_thread_needs_priority_change(req, uth)) {
4505*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4506*4f1223e8SApple OSS Distributions wq, 1, uthread_tid(uth), req->tr_qos);
4507*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4508*4f1223e8SApple OSS Distributions }
4509*4f1223e8SApple OSS Distributions assert(wq->wq_inheritor == get_machthread(uth));
4510*4f1223e8SApple OSS Distributions } else if (wq->wq_thidlecount) {
4511*4f1223e8SApple OSS Distributions /*
4512*4f1223e8SApple OSS Distributions * We need to unpark a creator thread
4513*4f1223e8SApple OSS Distributions */
4514*4f1223e8SApple OSS Distributions wq->wq_creator = uth = workq_pop_idle_thread(wq, UT_WORKQ_OVERCOMMIT,
4515*4f1223e8SApple OSS Distributions &needs_wakeup);
4516*4f1223e8SApple OSS Distributions /* Always reset the priorities on the newly chosen creator */
4517*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4518*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, get_machthread(uth),
4519*4f1223e8SApple OSS Distributions TURNSTILE_INHERITOR_THREAD);
4520*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select | DBG_FUNC_NONE,
4521*4f1223e8SApple OSS Distributions wq, 2, uthread_tid(uth), req->tr_qos);
4522*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4523*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields = 0;
4524*4f1223e8SApple OSS Distributions if (needs_wakeup) {
4525*4f1223e8SApple OSS Distributions workq_thread_wakeup(uth);
4526*4f1223e8SApple OSS Distributions }
4527*4f1223e8SApple OSS Distributions } else {
4528*4f1223e8SApple OSS Distributions /*
4529*4f1223e8SApple OSS Distributions * We need to allocate a thread...
4530*4f1223e8SApple OSS Distributions */
4531*4f1223e8SApple OSS Distributions if (__improbable(wq->wq_nthreads >= wq_max_threads)) {
4532*4f1223e8SApple OSS Distributions /* out of threads, just go away */
4533*4f1223e8SApple OSS Distributions flags = WORKQ_THREADREQ_NONE;
4534*4f1223e8SApple OSS Distributions } else if (flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) {
4535*4f1223e8SApple OSS Distributions act_set_astkevent(current_thread(), AST_KEVENT_REDRIVE_THREADREQ);
4536*4f1223e8SApple OSS Distributions } else if (!(flags & WORKQ_THREADREQ_CAN_CREATE_THREADS)) {
4537*4f1223e8SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4538*4f1223e8SApple OSS Distributions workq_schedule_immediate_thread_creation(wq);
4539*4f1223e8SApple OSS Distributions } else if ((workq_add_new_idle_thread(p, wq,
4540*4f1223e8SApple OSS Distributions workq_unpark_continue, false, NULL) == KERN_SUCCESS)) {
4541*4f1223e8SApple OSS Distributions goto again;
4542*4f1223e8SApple OSS Distributions } else {
4543*4f1223e8SApple OSS Distributions workq_schedule_delayed_thread_creation(wq, 0);
4544*4f1223e8SApple OSS Distributions }
4545*4f1223e8SApple OSS Distributions
4546*4f1223e8SApple OSS Distributions /*
4547*4f1223e8SApple OSS Distributions * If the current thread is the inheritor:
4548*4f1223e8SApple OSS Distributions *
4549*4f1223e8SApple OSS Distributions * If we set the AST, then the thread will stay the inheritor until
4550*4f1223e8SApple OSS Distributions * either the AST calls workq_kern_threadreq_redrive(), or it parks
4551*4f1223e8SApple OSS Distributions * and calls workq_push_idle_thread().
4552*4f1223e8SApple OSS Distributions *
4553*4f1223e8SApple OSS Distributions * Else, the responsibility of the thread creation is with a thread-call
4554*4f1223e8SApple OSS Distributions * and we need to clear the inheritor.
4555*4f1223e8SApple OSS Distributions */
4556*4f1223e8SApple OSS Distributions if ((flags & WORKQ_THREADREQ_SET_AST_ON_FAILURE) == 0 &&
4557*4f1223e8SApple OSS Distributions wq->wq_inheritor == current_thread()) {
4558*4f1223e8SApple OSS Distributions workq_turnstile_update_inheritor(wq, TURNSTILE_INHERITOR_NULL, 0);
4559*4f1223e8SApple OSS Distributions }
4560*4f1223e8SApple OSS Distributions }
4561*4f1223e8SApple OSS Distributions }
4562*4f1223e8SApple OSS Distributions
4563*4f1223e8SApple OSS Distributions /**
4564*4f1223e8SApple OSS Distributions * Same as workq_unpark_select_threadreq_or_park_and_unlock,
4565*4f1223e8SApple OSS Distributions * but do not allow early binds.
4566*4f1223e8SApple OSS Distributions *
4567*4f1223e8SApple OSS Distributions * Called with the base pri frozen, will unfreeze it.
4568*4f1223e8SApple OSS Distributions */
4569*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
4570*4f1223e8SApple OSS Distributions static void
workq_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4571*4f1223e8SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4572*4f1223e8SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4573*4f1223e8SApple OSS Distributions {
4574*4f1223e8SApple OSS Distributions workq_threadreq_t req = NULL;
4575*4f1223e8SApple OSS Distributions bool is_creator = (wq->wq_creator == uth);
4576*4f1223e8SApple OSS Distributions bool schedule_creator = false;
4577*4f1223e8SApple OSS Distributions
4578*4f1223e8SApple OSS Distributions if (__improbable(_wq_exiting(wq))) {
4579*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 0, 0, 0);
4580*4f1223e8SApple OSS Distributions goto park;
4581*4f1223e8SApple OSS Distributions }
4582*4f1223e8SApple OSS Distributions
4583*4f1223e8SApple OSS Distributions if (wq->wq_reqcount == 0) {
4584*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 1, 0, 0);
4585*4f1223e8SApple OSS Distributions goto park;
4586*4f1223e8SApple OSS Distributions }
4587*4f1223e8SApple OSS Distributions
4588*4f1223e8SApple OSS Distributions req = workq_threadreq_select(wq, uth);
4589*4f1223e8SApple OSS Distributions if (__improbable(req == NULL)) {
4590*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 2, 0, 0);
4591*4f1223e8SApple OSS Distributions goto park;
4592*4f1223e8SApple OSS Distributions }
4593*4f1223e8SApple OSS Distributions
4594*4f1223e8SApple OSS Distributions struct uu_workq_policy old_pri = uth->uu_workq_pri;
4595*4f1223e8SApple OSS Distributions uint8_t tr_flags = req->tr_flags;
4596*4f1223e8SApple OSS Distributions struct turnstile *req_ts = kqueue_threadreq_get_turnstile(req);
4597*4f1223e8SApple OSS Distributions
4598*4f1223e8SApple OSS Distributions /*
4599*4f1223e8SApple OSS Distributions * Attempt to setup ourselves as the new thing to run, moving all priority
4600*4f1223e8SApple OSS Distributions * pushes to ourselves.
4601*4f1223e8SApple OSS Distributions *
4602*4f1223e8SApple OSS Distributions * If the current thread is the creator, then the fact that we are presently
4603*4f1223e8SApple OSS Distributions * running is proof that we'll do something useful, so keep going.
4604*4f1223e8SApple OSS Distributions *
4605*4f1223e8SApple OSS Distributions * For other cases, peek at the AST to know whether the scheduler wants
4606*4f1223e8SApple OSS Distributions * to preempt us, if yes, park instead, and move the thread request
4607*4f1223e8SApple OSS Distributions * turnstile back to the workqueue.
4608*4f1223e8SApple OSS Distributions */
4609*4f1223e8SApple OSS Distributions if (req_ts) {
4610*4f1223e8SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4611*4f1223e8SApple OSS Distributions turnstile_update_inheritor(req_ts, get_machthread(uth),
4612*4f1223e8SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_THREAD);
4613*4f1223e8SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4614*4f1223e8SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4615*4f1223e8SApple OSS Distributions });
4616*4f1223e8SApple OSS Distributions }
4617*4f1223e8SApple OSS Distributions
4618*4f1223e8SApple OSS Distributions /* accounting changes of aggregate thscheduled_count and thactive which has
4619*4f1223e8SApple OSS Distributions * to be paired with the workq_thread_reset_pri below so that we have
4620*4f1223e8SApple OSS Distributions * uth->uu_workq_pri match with thactive.
4621*4f1223e8SApple OSS Distributions *
4622*4f1223e8SApple OSS Distributions * This is undone when the thread parks */
4623*4f1223e8SApple OSS Distributions if (is_creator) {
4624*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_select, wq, 4, 0,
4625*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields);
4626*4f1223e8SApple OSS Distributions wq->wq_creator = NULL;
4627*4f1223e8SApple OSS Distributions _wq_thactive_inc(wq, req->tr_qos);
4628*4f1223e8SApple OSS Distributions wq->wq_thscheduled_count[_wq_bucket(req->tr_qos)]++;
4629*4f1223e8SApple OSS Distributions } else if (old_pri.qos_bucket != req->tr_qos) {
4630*4f1223e8SApple OSS Distributions _wq_thactive_move(wq, old_pri.qos_bucket, req->tr_qos);
4631*4f1223e8SApple OSS Distributions }
4632*4f1223e8SApple OSS Distributions workq_thread_reset_pri(wq, uth, req, /*unpark*/ true);
4633*4f1223e8SApple OSS Distributions
4634*4f1223e8SApple OSS Distributions /*
4635*4f1223e8SApple OSS Distributions * Make relevant accounting changes for pool specific counts.
4636*4f1223e8SApple OSS Distributions *
4637*4f1223e8SApple OSS Distributions * The schedule counts changing can affect what the next best request
4638*4f1223e8SApple OSS Distributions * for cooperative thread pool is if this request is dequeued.
4639*4f1223e8SApple OSS Distributions */
4640*4f1223e8SApple OSS Distributions bool cooperative_sched_count_changed =
4641*4f1223e8SApple OSS Distributions workq_adjust_cooperative_constrained_schedule_counts(wq, uth,
4642*4f1223e8SApple OSS Distributions old_pri.qos_req, tr_flags);
4643*4f1223e8SApple OSS Distributions
4644*4f1223e8SApple OSS Distributions if (workq_tr_is_overcommit(tr_flags)) {
4645*4f1223e8SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4646*4f1223e8SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4647*4f1223e8SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_COOPERATIVE);
4648*4f1223e8SApple OSS Distributions } else {
4649*4f1223e8SApple OSS Distributions workq_thread_set_type(uth, 0);
4650*4f1223e8SApple OSS Distributions }
4651*4f1223e8SApple OSS Distributions
4652*4f1223e8SApple OSS Distributions if (__improbable(thread_unfreeze_base_pri(get_machthread(uth)) && !is_creator)) {
4653*4f1223e8SApple OSS Distributions if (req_ts) {
4654*4f1223e8SApple OSS Distributions workq_perform_turnstile_operation_locked(wq, ^{
4655*4f1223e8SApple OSS Distributions turnstile_update_inheritor(req_ts, wq->wq_turnstile,
4656*4f1223e8SApple OSS Distributions TURNSTILE_IMMEDIATE_UPDATE | TURNSTILE_INHERITOR_TURNSTILE);
4657*4f1223e8SApple OSS Distributions turnstile_update_inheritor_complete(req_ts,
4658*4f1223e8SApple OSS Distributions TURNSTILE_INTERLOCK_HELD);
4659*4f1223e8SApple OSS Distributions });
4660*4f1223e8SApple OSS Distributions }
4661*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_select_threadreq | DBG_FUNC_NONE, wq, 3, 0, 0);
4662*4f1223e8SApple OSS Distributions
4663*4f1223e8SApple OSS Distributions /*
4664*4f1223e8SApple OSS Distributions * If a cooperative thread was the one which picked up the manager
4665*4f1223e8SApple OSS Distributions * thread request, we need to reevaluate the cooperative pool before
4666*4f1223e8SApple OSS Distributions * it goes and parks.
4667*4f1223e8SApple OSS Distributions *
4668*4f1223e8SApple OSS Distributions * For every other of thread request that it picks up, the logic in
4669*4f1223e8SApple OSS Distributions * workq_threadreq_select should have done this refresh.
4670*4f1223e8SApple OSS Distributions * See workq_push_idle_thread.
4671*4f1223e8SApple OSS Distributions */
4672*4f1223e8SApple OSS Distributions if (cooperative_sched_count_changed) {
4673*4f1223e8SApple OSS Distributions if (req->tr_qos == WORKQ_THREAD_QOS_MANAGER) {
4674*4f1223e8SApple OSS Distributions _wq_cooperative_queue_refresh_best_req_qos(wq);
4675*4f1223e8SApple OSS Distributions }
4676*4f1223e8SApple OSS Distributions }
4677*4f1223e8SApple OSS Distributions goto park_thawed;
4678*4f1223e8SApple OSS Distributions }
4679*4f1223e8SApple OSS Distributions
4680*4f1223e8SApple OSS Distributions /*
4681*4f1223e8SApple OSS Distributions * We passed all checks, dequeue the request, bind to it, and set it up
4682*4f1223e8SApple OSS Distributions * to return to user.
4683*4f1223e8SApple OSS Distributions */
4684*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4685*4f1223e8SApple OSS Distributions workq_trace_req_id(req), tr_flags, 0);
4686*4f1223e8SApple OSS Distributions wq->wq_fulfilled++;
4687*4f1223e8SApple OSS Distributions schedule_creator = workq_threadreq_dequeue(wq, req,
4688*4f1223e8SApple OSS Distributions cooperative_sched_count_changed);
4689*4f1223e8SApple OSS Distributions
4690*4f1223e8SApple OSS Distributions workq_thread_reset_cpupercent(req, uth);
4691*4f1223e8SApple OSS Distributions
4692*4f1223e8SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4693*4f1223e8SApple OSS Distributions kqueue_threadreq_bind_prepost(p, req, uth);
4694*4f1223e8SApple OSS Distributions req = NULL;
4695*4f1223e8SApple OSS Distributions } else if (req->tr_count > 0) {
4696*4f1223e8SApple OSS Distributions req = NULL;
4697*4f1223e8SApple OSS Distributions }
4698*4f1223e8SApple OSS Distributions
4699*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4700*4f1223e8SApple OSS Distributions uth->uu_workq_flags ^= UT_WORKQ_NEW;
4701*4f1223e8SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4702*4f1223e8SApple OSS Distributions }
4703*4f1223e8SApple OSS Distributions
4704*4f1223e8SApple OSS Distributions /* If one of the following is true, call workq_schedule_creator (which also
4705*4f1223e8SApple OSS Distributions * adjusts priority of existing creator):
4706*4f1223e8SApple OSS Distributions *
4707*4f1223e8SApple OSS Distributions * - We are the creator currently so the wq may need a new creator
4708*4f1223e8SApple OSS Distributions * - The request we're binding to is the highest priority one, existing
4709*4f1223e8SApple OSS Distributions * creator's priority might need to be adjusted to reflect the next
4710*4f1223e8SApple OSS Distributions * highest TR
4711*4f1223e8SApple OSS Distributions */
4712*4f1223e8SApple OSS Distributions if (is_creator || schedule_creator) {
4713*4f1223e8SApple OSS Distributions /* This can drop the workqueue lock, and take it again */
4714*4f1223e8SApple OSS Distributions workq_schedule_creator(p, wq, WORKQ_THREADREQ_CAN_CREATE_THREADS);
4715*4f1223e8SApple OSS Distributions }
4716*4f1223e8SApple OSS Distributions
4717*4f1223e8SApple OSS Distributions workq_unlock(wq);
4718*4f1223e8SApple OSS Distributions
4719*4f1223e8SApple OSS Distributions if (req) {
4720*4f1223e8SApple OSS Distributions zfree(workq_zone_threadreq, req);
4721*4f1223e8SApple OSS Distributions }
4722*4f1223e8SApple OSS Distributions
4723*4f1223e8SApple OSS Distributions /*
4724*4f1223e8SApple OSS Distributions * Run Thread, Run!
4725*4f1223e8SApple OSS Distributions */
4726*4f1223e8SApple OSS Distributions uint32_t upcall_flags = WQ_FLAG_THREAD_NEWSPI;
4727*4f1223e8SApple OSS Distributions if (uth->uu_workq_pri.qos_bucket == WORKQ_THREAD_QOS_MANAGER) {
4728*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_EVENT_MANAGER;
4729*4f1223e8SApple OSS Distributions } else if (workq_tr_is_overcommit(tr_flags)) {
4730*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4731*4f1223e8SApple OSS Distributions } else if (workq_tr_is_cooperative(tr_flags)) {
4732*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_COOPERATIVE;
4733*4f1223e8SApple OSS Distributions }
4734*4f1223e8SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_KEVENT) {
4735*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_KEVENT;
4736*4f1223e8SApple OSS Distributions assert((upcall_flags & WQ_FLAG_THREAD_COOPERATIVE) == 0);
4737*4f1223e8SApple OSS Distributions }
4738*4f1223e8SApple OSS Distributions
4739*4f1223e8SApple OSS Distributions if (tr_flags & WORKQ_TR_FLAG_WORKLOOP) {
4740*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT;
4741*4f1223e8SApple OSS Distributions }
4742*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
4743*4f1223e8SApple OSS Distributions
4744*4f1223e8SApple OSS Distributions if (tr_flags & (WORKQ_TR_FLAG_KEVENT | WORKQ_TR_FLAG_WORKLOOP)) {
4745*4f1223e8SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
4746*4f1223e8SApple OSS Distributions } else {
4747*4f1223e8SApple OSS Distributions #if CONFIG_PREADOPT_TG
4748*4f1223e8SApple OSS Distributions /*
4749*4f1223e8SApple OSS Distributions * The thread may have a preadopt thread group on it already because it
4750*4f1223e8SApple OSS Distributions * got tagged with it as a creator thread. So we need to make sure to
4751*4f1223e8SApple OSS Distributions * clear that since we don't have preadoption for anonymous thread
4752*4f1223e8SApple OSS Distributions * requests
4753*4f1223e8SApple OSS Distributions */
4754*4f1223e8SApple OSS Distributions thread_set_preadopt_thread_group(get_machthread(uth), NULL);
4755*4f1223e8SApple OSS Distributions #endif
4756*4f1223e8SApple OSS Distributions }
4757*4f1223e8SApple OSS Distributions
4758*4f1223e8SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4759*4f1223e8SApple OSS Distributions __builtin_unreachable();
4760*4f1223e8SApple OSS Distributions
4761*4f1223e8SApple OSS Distributions park:
4762*4f1223e8SApple OSS Distributions thread_unfreeze_base_pri(get_machthread(uth));
4763*4f1223e8SApple OSS Distributions park_thawed:
4764*4f1223e8SApple OSS Distributions workq_park_and_unlock(p, wq, uth, setup_flags);
4765*4f1223e8SApple OSS Distributions }
4766*4f1223e8SApple OSS Distributions
4767*4f1223e8SApple OSS Distributions /**
4768*4f1223e8SApple OSS Distributions * Runs a thread request on a thread
4769*4f1223e8SApple OSS Distributions *
4770*4f1223e8SApple OSS Distributions * - if thread is THREAD_NULL, will find a thread and run the request there.
4771*4f1223e8SApple OSS Distributions * Otherwise, the thread must be the current thread.
4772*4f1223e8SApple OSS Distributions *
4773*4f1223e8SApple OSS Distributions * - if req is NULL, will find the highest priority request and run that. If
4774*4f1223e8SApple OSS Distributions * it is not NULL, it must be a threadreq object in state NEW. If it can not
4775*4f1223e8SApple OSS Distributions * be run immediately, it will be enqueued and moved to state QUEUED.
4776*4f1223e8SApple OSS Distributions *
4777*4f1223e8SApple OSS Distributions * Either way, the thread request object serviced will be moved to state
4778*4f1223e8SApple OSS Distributions * BINDING and attached to the uthread.
4779*4f1223e8SApple OSS Distributions *
4780*4f1223e8SApple OSS Distributions * Should be called with the workqueue lock held. Will drop it.
4781*4f1223e8SApple OSS Distributions * Should be called with the base pri not frozen.
4782*4f1223e8SApple OSS Distributions */
4783*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
4784*4f1223e8SApple OSS Distributions static void
workq_unpark_select_threadreq_or_park_and_unlock(proc_t p,struct workqueue * wq,struct uthread * uth,uint32_t setup_flags)4785*4f1223e8SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(proc_t p, struct workqueue *wq,
4786*4f1223e8SApple OSS Distributions struct uthread *uth, uint32_t setup_flags)
4787*4f1223e8SApple OSS Distributions {
4788*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_EARLY_BOUND) {
4789*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_NEW) {
4790*4f1223e8SApple OSS Distributions setup_flags |= WQ_SETUP_FIRST_USE;
4791*4f1223e8SApple OSS Distributions }
4792*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~(UT_WORKQ_NEW | UT_WORKQ_EARLY_BOUND);
4793*4f1223e8SApple OSS Distributions /*
4794*4f1223e8SApple OSS Distributions * This pointer is possibly freed and only used for tracing purposes.
4795*4f1223e8SApple OSS Distributions */
4796*4f1223e8SApple OSS Distributions workq_threadreq_t req = uth->uu_save.uus_workq_park_data.thread_request;
4797*4f1223e8SApple OSS Distributions workq_unlock(wq);
4798*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_thread_logical_run | DBG_FUNC_START, wq,
4799*4f1223e8SApple OSS Distributions VM_KERNEL_ADDRHIDE(req), 0, 0);
4800*4f1223e8SApple OSS Distributions (void)req;
4801*4f1223e8SApple OSS Distributions
4802*4f1223e8SApple OSS Distributions workq_setup_and_run(p, uth, setup_flags);
4803*4f1223e8SApple OSS Distributions __builtin_unreachable();
4804*4f1223e8SApple OSS Distributions }
4805*4f1223e8SApple OSS Distributions
4806*4f1223e8SApple OSS Distributions thread_freeze_base_pri(get_machthread(uth));
4807*4f1223e8SApple OSS Distributions workq_select_threadreq_or_park_and_unlock(p, wq, uth, setup_flags);
4808*4f1223e8SApple OSS Distributions }
4809*4f1223e8SApple OSS Distributions
4810*4f1223e8SApple OSS Distributions static bool
workq_creator_should_yield(struct workqueue * wq,struct uthread * uth)4811*4f1223e8SApple OSS Distributions workq_creator_should_yield(struct workqueue *wq, struct uthread *uth)
4812*4f1223e8SApple OSS Distributions {
4813*4f1223e8SApple OSS Distributions thread_qos_t qos = workq_pri_override(uth->uu_workq_pri);
4814*4f1223e8SApple OSS Distributions
4815*4f1223e8SApple OSS Distributions if (qos >= THREAD_QOS_USER_INTERACTIVE) {
4816*4f1223e8SApple OSS Distributions return false;
4817*4f1223e8SApple OSS Distributions }
4818*4f1223e8SApple OSS Distributions
4819*4f1223e8SApple OSS Distributions uint32_t snapshot = uth->uu_save.uus_workq_park_data.fulfilled_snapshot;
4820*4f1223e8SApple OSS Distributions if (wq->wq_fulfilled == snapshot) {
4821*4f1223e8SApple OSS Distributions return false;
4822*4f1223e8SApple OSS Distributions }
4823*4f1223e8SApple OSS Distributions
4824*4f1223e8SApple OSS Distributions uint32_t cnt = 0, conc = wq_max_parallelism[_wq_bucket(qos)];
4825*4f1223e8SApple OSS Distributions if (wq->wq_fulfilled - snapshot > conc) {
4826*4f1223e8SApple OSS Distributions /* we fulfilled more than NCPU requests since being dispatched */
4827*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 1,
4828*4f1223e8SApple OSS Distributions wq->wq_fulfilled, snapshot);
4829*4f1223e8SApple OSS Distributions return true;
4830*4f1223e8SApple OSS Distributions }
4831*4f1223e8SApple OSS Distributions
4832*4f1223e8SApple OSS Distributions for (uint8_t i = _wq_bucket(qos); i < WORKQ_NUM_QOS_BUCKETS; i++) {
4833*4f1223e8SApple OSS Distributions cnt += wq->wq_thscheduled_count[i];
4834*4f1223e8SApple OSS Distributions }
4835*4f1223e8SApple OSS Distributions if (conc <= cnt) {
4836*4f1223e8SApple OSS Distributions /* We fulfilled requests and have more than NCPU scheduled threads */
4837*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_creator_yield, wq, 2,
4838*4f1223e8SApple OSS Distributions wq->wq_fulfilled, snapshot);
4839*4f1223e8SApple OSS Distributions return true;
4840*4f1223e8SApple OSS Distributions }
4841*4f1223e8SApple OSS Distributions
4842*4f1223e8SApple OSS Distributions return false;
4843*4f1223e8SApple OSS Distributions }
4844*4f1223e8SApple OSS Distributions
4845*4f1223e8SApple OSS Distributions /**
4846*4f1223e8SApple OSS Distributions * parked idle thread wakes up
4847*4f1223e8SApple OSS Distributions */
4848*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
4849*4f1223e8SApple OSS Distributions static void
workq_unpark_continue(void * parameter __unused,wait_result_t wr __unused)4850*4f1223e8SApple OSS Distributions workq_unpark_continue(void *parameter __unused, wait_result_t wr __unused)
4851*4f1223e8SApple OSS Distributions {
4852*4f1223e8SApple OSS Distributions thread_t th = current_thread();
4853*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(th);
4854*4f1223e8SApple OSS Distributions proc_t p = current_proc();
4855*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
4856*4f1223e8SApple OSS Distributions
4857*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
4858*4f1223e8SApple OSS Distributions
4859*4f1223e8SApple OSS Distributions if (wq->wq_creator == uth && workq_creator_should_yield(wq, uth)) {
4860*4f1223e8SApple OSS Distributions /*
4861*4f1223e8SApple OSS Distributions * If the number of threads we have out are able to keep up with the
4862*4f1223e8SApple OSS Distributions * demand, then we should avoid sending this creator thread to
4863*4f1223e8SApple OSS Distributions * userspace.
4864*4f1223e8SApple OSS Distributions */
4865*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.fulfilled_snapshot = wq->wq_fulfilled;
4866*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.yields++;
4867*4f1223e8SApple OSS Distributions workq_unlock(wq);
4868*4f1223e8SApple OSS Distributions thread_yield_with_continuation(workq_unpark_continue, NULL);
4869*4f1223e8SApple OSS Distributions __builtin_unreachable();
4870*4f1223e8SApple OSS Distributions }
4871*4f1223e8SApple OSS Distributions
4872*4f1223e8SApple OSS Distributions if (__probable(uth->uu_workq_flags & UT_WORKQ_RUNNING)) {
4873*4f1223e8SApple OSS Distributions workq_unpark_select_threadreq_or_park_and_unlock(p, wq, uth, WQ_SETUP_NONE);
4874*4f1223e8SApple OSS Distributions __builtin_unreachable();
4875*4f1223e8SApple OSS Distributions }
4876*4f1223e8SApple OSS Distributions
4877*4f1223e8SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
4878*4f1223e8SApple OSS Distributions /*
4879*4f1223e8SApple OSS Distributions * We were set running, but for the purposes of dying.
4880*4f1223e8SApple OSS Distributions */
4881*4f1223e8SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
4882*4f1223e8SApple OSS Distributions assert((uth->uu_workq_flags & UT_WORKQ_NEW) == 0);
4883*4f1223e8SApple OSS Distributions } else {
4884*4f1223e8SApple OSS Distributions /*
4885*4f1223e8SApple OSS Distributions * workaround for <rdar://problem/38647347>,
4886*4f1223e8SApple OSS Distributions * in case we do hit userspace, make sure calling
4887*4f1223e8SApple OSS Distributions * workq_thread_terminate() does the right thing here,
4888*4f1223e8SApple OSS Distributions * and if we never call it, that workq_exit() will too because it sees
4889*4f1223e8SApple OSS Distributions * this thread on the runlist.
4890*4f1223e8SApple OSS Distributions */
4891*4f1223e8SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
4892*4f1223e8SApple OSS Distributions wq->wq_thdying_count++;
4893*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_DYING;
4894*4f1223e8SApple OSS Distributions }
4895*4f1223e8SApple OSS Distributions
4896*4f1223e8SApple OSS Distributions workq_unpark_for_death_and_unlock(p, wq, uth,
4897*4f1223e8SApple OSS Distributions WORKQ_UNPARK_FOR_DEATH_WAS_IDLE, WQ_SETUP_NONE);
4898*4f1223e8SApple OSS Distributions __builtin_unreachable();
4899*4f1223e8SApple OSS Distributions }
4900*4f1223e8SApple OSS Distributions
4901*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
4902*4f1223e8SApple OSS Distributions static void
workq_setup_and_run(proc_t p,struct uthread * uth,int setup_flags)4903*4f1223e8SApple OSS Distributions workq_setup_and_run(proc_t p, struct uthread *uth, int setup_flags)
4904*4f1223e8SApple OSS Distributions {
4905*4f1223e8SApple OSS Distributions thread_t th = get_machthread(uth);
4906*4f1223e8SApple OSS Distributions vm_map_t vmap = get_task_map(proc_task(p));
4907*4f1223e8SApple OSS Distributions
4908*4f1223e8SApple OSS Distributions if (setup_flags & WQ_SETUP_CLEAR_VOUCHER) {
4909*4f1223e8SApple OSS Distributions /*
4910*4f1223e8SApple OSS Distributions * For preemption reasons, we want to reset the voucher as late as
4911*4f1223e8SApple OSS Distributions * possible, so we do it in two places:
4912*4f1223e8SApple OSS Distributions * - Just before parking (i.e. in workq_park_and_unlock())
4913*4f1223e8SApple OSS Distributions * - Prior to doing the setup for the next workitem (i.e. here)
4914*4f1223e8SApple OSS Distributions *
4915*4f1223e8SApple OSS Distributions * Those two places are sufficient to ensure we always reset it before
4916*4f1223e8SApple OSS Distributions * it goes back out to user space, but be careful to not break that
4917*4f1223e8SApple OSS Distributions * guarantee.
4918*4f1223e8SApple OSS Distributions *
4919*4f1223e8SApple OSS Distributions * Note that setting the voucher to NULL will not clear the preadoption
4920*4f1223e8SApple OSS Distributions * thread group on this thread
4921*4f1223e8SApple OSS Distributions */
4922*4f1223e8SApple OSS Distributions __assert_only kern_return_t kr;
4923*4f1223e8SApple OSS Distributions kr = thread_set_voucher_name(MACH_PORT_NULL);
4924*4f1223e8SApple OSS Distributions assert(kr == KERN_SUCCESS);
4925*4f1223e8SApple OSS Distributions }
4926*4f1223e8SApple OSS Distributions
4927*4f1223e8SApple OSS Distributions uint32_t upcall_flags = uth->uu_save.uus_workq_park_data.upcall_flags;
4928*4f1223e8SApple OSS Distributions if (!(setup_flags & WQ_SETUP_FIRST_USE)) {
4929*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_REUSE;
4930*4f1223e8SApple OSS Distributions }
4931*4f1223e8SApple OSS Distributions
4932*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_OUTSIDE_QOS) {
4933*4f1223e8SApple OSS Distributions /*
4934*4f1223e8SApple OSS Distributions * For threads that have an outside-of-QoS thread priority, indicate
4935*4f1223e8SApple OSS Distributions * to userspace that setting QoS should only affect the TSD and not
4936*4f1223e8SApple OSS Distributions * change QOS in the kernel.
4937*4f1223e8SApple OSS Distributions */
4938*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OUTSIDEQOS;
4939*4f1223e8SApple OSS Distributions } else {
4940*4f1223e8SApple OSS Distributions /*
4941*4f1223e8SApple OSS Distributions * Put the QoS class value into the lower bits of the reuse_thread
4942*4f1223e8SApple OSS Distributions * register, this is where the thread priority used to be stored
4943*4f1223e8SApple OSS Distributions * anyway.
4944*4f1223e8SApple OSS Distributions */
4945*4f1223e8SApple OSS Distributions upcall_flags |= uth->uu_save.uus_workq_park_data.qos |
4946*4f1223e8SApple OSS Distributions WQ_FLAG_THREAD_PRIO_QOS;
4947*4f1223e8SApple OSS Distributions }
4948*4f1223e8SApple OSS Distributions
4949*4f1223e8SApple OSS Distributions if (uth->uu_workq_thport == MACH_PORT_NULL) {
4950*4f1223e8SApple OSS Distributions /* convert_thread_to_port_pinned() consumes a reference */
4951*4f1223e8SApple OSS Distributions thread_reference(th);
4952*4f1223e8SApple OSS Distributions /* Convert to immovable/pinned thread port, but port is not pinned yet */
4953*4f1223e8SApple OSS Distributions ipc_port_t port = convert_thread_to_port_pinned(th);
4954*4f1223e8SApple OSS Distributions /* Atomically, pin and copy out the port */
4955*4f1223e8SApple OSS Distributions uth->uu_workq_thport = ipc_port_copyout_send_pinned(port, get_task_ipcspace(proc_task(p)));
4956*4f1223e8SApple OSS Distributions }
4957*4f1223e8SApple OSS Distributions
4958*4f1223e8SApple OSS Distributions /* Thread has been set up to run, arm its next workqueue quantum or disarm
4959*4f1223e8SApple OSS Distributions * if it is no longer supporting that */
4960*4f1223e8SApple OSS Distributions if (thread_supports_cooperative_workqueue(th)) {
4961*4f1223e8SApple OSS Distributions thread_arm_workqueue_quantum(th);
4962*4f1223e8SApple OSS Distributions } else {
4963*4f1223e8SApple OSS Distributions thread_disarm_workqueue_quantum(th);
4964*4f1223e8SApple OSS Distributions }
4965*4f1223e8SApple OSS Distributions
4966*4f1223e8SApple OSS Distributions /*
4967*4f1223e8SApple OSS Distributions * Call out to pthread, this sets up the thread, pulls in kevent structs
4968*4f1223e8SApple OSS Distributions * onto the stack, sets up the thread state and then returns to userspace.
4969*4f1223e8SApple OSS Distributions */
4970*4f1223e8SApple OSS Distributions WQ_TRACE_WQ(TRACE_wq_runthread | DBG_FUNC_START,
4971*4f1223e8SApple OSS Distributions proc_get_wqptr_fast(p), 0, 0, 0);
4972*4f1223e8SApple OSS Distributions
4973*4f1223e8SApple OSS Distributions if (workq_thread_is_cooperative(uth) || workq_thread_is_permanently_bound(uth)) {
4974*4f1223e8SApple OSS Distributions thread_sched_call(th, NULL);
4975*4f1223e8SApple OSS Distributions } else {
4976*4f1223e8SApple OSS Distributions thread_sched_call(th, workq_sched_callback);
4977*4f1223e8SApple OSS Distributions }
4978*4f1223e8SApple OSS Distributions
4979*4f1223e8SApple OSS Distributions pthread_functions->workq_setup_thread(p, th, vmap, uth->uu_workq_stackaddr,
4980*4f1223e8SApple OSS Distributions uth->uu_workq_thport, 0, setup_flags, upcall_flags);
4981*4f1223e8SApple OSS Distributions
4982*4f1223e8SApple OSS Distributions __builtin_unreachable();
4983*4f1223e8SApple OSS Distributions }
4984*4f1223e8SApple OSS Distributions
4985*4f1223e8SApple OSS Distributions /**
4986*4f1223e8SApple OSS Distributions * A wrapper around workq_setup_and_run for permanently bound thread.
4987*4f1223e8SApple OSS Distributions */
4988*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
4989*4f1223e8SApple OSS Distributions static void
workq_bound_thread_setup_and_run(struct uthread * uth,int setup_flags)4990*4f1223e8SApple OSS Distributions workq_bound_thread_setup_and_run(struct uthread *uth, int setup_flags)
4991*4f1223e8SApple OSS Distributions {
4992*4f1223e8SApple OSS Distributions struct workq_threadreq_s * kqr = uth->uu_kqr_bound;
4993*4f1223e8SApple OSS Distributions
4994*4f1223e8SApple OSS Distributions uint32_t upcall_flags = (WQ_FLAG_THREAD_NEWSPI |
4995*4f1223e8SApple OSS Distributions WQ_FLAG_THREAD_WORKLOOP | WQ_FLAG_THREAD_KEVENT);
4996*4f1223e8SApple OSS Distributions if (workq_tr_is_overcommit(kqr->tr_flags)) {
4997*4f1223e8SApple OSS Distributions workq_thread_set_type(uth, UT_WORKQ_OVERCOMMIT);
4998*4f1223e8SApple OSS Distributions upcall_flags |= WQ_FLAG_THREAD_OVERCOMMIT;
4999*4f1223e8SApple OSS Distributions }
5000*4f1223e8SApple OSS Distributions uth->uu_save.uus_workq_park_data.upcall_flags = upcall_flags;
5001*4f1223e8SApple OSS Distributions workq_setup_and_run(current_proc(), uth, setup_flags);
5002*4f1223e8SApple OSS Distributions __builtin_unreachable();
5003*4f1223e8SApple OSS Distributions }
5004*4f1223e8SApple OSS Distributions
5005*4f1223e8SApple OSS Distributions /**
5006*4f1223e8SApple OSS Distributions * A parked bound thread wakes up for the first time.
5007*4f1223e8SApple OSS Distributions */
5008*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
5009*4f1223e8SApple OSS Distributions static void
workq_bound_thread_initialize_and_unpark_continue(void * parameter __unused,wait_result_t wr)5010*4f1223e8SApple OSS Distributions workq_bound_thread_initialize_and_unpark_continue(void *parameter __unused,
5011*4f1223e8SApple OSS Distributions wait_result_t wr)
5012*4f1223e8SApple OSS Distributions {
5013*4f1223e8SApple OSS Distributions /*
5014*4f1223e8SApple OSS Distributions * Locking model for accessing uu_workq_flags :
5015*4f1223e8SApple OSS Distributions *
5016*4f1223e8SApple OSS Distributions * The concurrent access to uu_workq_flags is synchronized with workq lock
5017*4f1223e8SApple OSS Distributions * until a thread gets permanently bound to a kqwl. Post that, kqlock
5018*4f1223e8SApple OSS Distributions * is used for subsequent synchronizations. This gives us a significant
5019*4f1223e8SApple OSS Distributions * benefit by avoiding having to take a process wide workq lock on every
5020*4f1223e8SApple OSS Distributions * wakeup of the bound thread.
5021*4f1223e8SApple OSS Distributions * This flip in locking model is tracked with UT_WORKQ_PERMANENT_BIND flag.
5022*4f1223e8SApple OSS Distributions *
5023*4f1223e8SApple OSS Distributions * There is one more optimization we can perform for when the thread is
5024*4f1223e8SApple OSS Distributions * awakened for running (i.e THREAD_AWAKENED) until it parks.
5025*4f1223e8SApple OSS Distributions * During this window, we know KQ_SLEEP bit is reset so there should not
5026*4f1223e8SApple OSS Distributions * be any concurrent attempts to modify uu_workq_flags by
5027*4f1223e8SApple OSS Distributions * kqworkloop_bound_thread_wakeup because the thread is already "awake".
5028*4f1223e8SApple OSS Distributions * So we can safely access uu_workq_flags within this window without having
5029*4f1223e8SApple OSS Distributions * to take kqlock. This KQ_SLEEP is later set by the bound thread under
5030*4f1223e8SApple OSS Distributions * kqlock on its way to parking.
5031*4f1223e8SApple OSS Distributions */
5032*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(current_thread());
5033*4f1223e8SApple OSS Distributions
5034*4f1223e8SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
5035*4f1223e8SApple OSS Distributions /* At most one flag. */
5036*4f1223e8SApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING))
5037*4f1223e8SApple OSS Distributions != (UT_WORKQ_RUNNING | UT_WORKQ_DYING));
5038*4f1223e8SApple OSS Distributions
5039*4f1223e8SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5040*4f1223e8SApple OSS Distributions
5041*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
5042*4f1223e8SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_NEW);
5043*4f1223e8SApple OSS Distributions uth->uu_workq_flags &= ~UT_WORKQ_NEW;
5044*4f1223e8SApple OSS Distributions
5045*4f1223e8SApple OSS Distributions struct workq_threadreq_s * kqr = uth->uu_kqr_bound;
5046*4f1223e8SApple OSS Distributions if (kqr->tr_work_interval) {
5047*4f1223e8SApple OSS Distributions kern_return_t kr;
5048*4f1223e8SApple OSS Distributions kr = kern_work_interval_explicit_join(get_machthread(uth),
5049*4f1223e8SApple OSS Distributions kqr->tr_work_interval);
5050*4f1223e8SApple OSS Distributions /*
5051*4f1223e8SApple OSS Distributions * The work interval functions requires to be called on the
5052*4f1223e8SApple OSS Distributions * current thread. If we fail here, we record the fact and
5053*4f1223e8SApple OSS Distributions * continue.
5054*4f1223e8SApple OSS Distributions * In the future, we can preflight checking that this join will
5055*4f1223e8SApple OSS Distributions * always be successful when the paird kqwl is configured; but,
5056*4f1223e8SApple OSS Distributions * for now, this should be a rare case (e.g. if you have passed
5057*4f1223e8SApple OSS Distributions * invalid arguments to the join).
5058*4f1223e8SApple OSS Distributions */
5059*4f1223e8SApple OSS Distributions if (kr == KERN_SUCCESS) {
5060*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_WORK_INTERVAL_JOINED;
5061*4f1223e8SApple OSS Distributions /* Thread and kqwl both have +1 ref on the work interval. */
5062*4f1223e8SApple OSS Distributions } else {
5063*4f1223e8SApple OSS Distributions uth->uu_workq_flags |= UT_WORKQ_WORK_INTERVAL_FAILED;
5064*4f1223e8SApple OSS Distributions }
5065*4f1223e8SApple OSS Distributions }
5066*4f1223e8SApple OSS Distributions workq_thread_reset_cpupercent(kqr, uth);
5067*4f1223e8SApple OSS Distributions workq_bound_thread_setup_and_run(uth, WQ_SETUP_FIRST_USE);
5068*4f1223e8SApple OSS Distributions __builtin_unreachable();
5069*4f1223e8SApple OSS Distributions } else {
5070*4f1223e8SApple OSS Distributions /*
5071*4f1223e8SApple OSS Distributions * The permanently bound kqworkloop is getting destroyed so we
5072*4f1223e8SApple OSS Distributions * are woken up to cleanly unbind ourselves from it and terminate.
5073*4f1223e8SApple OSS Distributions * See KQ_WORKLOOP_DESTROY -> workq_kern_bound_thread_wakeup.
5074*4f1223e8SApple OSS Distributions *
5075*4f1223e8SApple OSS Distributions * The actual full unbind happens from
5076*4f1223e8SApple OSS Distributions * uthread_cleanup -> kqueue_threadreq_unbind.
5077*4f1223e8SApple OSS Distributions */
5078*4f1223e8SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
5079*4f1223e8SApple OSS Distributions }
5080*4f1223e8SApple OSS Distributions } else {
5081*4f1223e8SApple OSS Distributions /*
5082*4f1223e8SApple OSS Distributions * The process is getting terminated so we are woken up to die.
5083*4f1223e8SApple OSS Distributions * E.g. SIGKILL'd.
5084*4f1223e8SApple OSS Distributions */
5085*4f1223e8SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
5086*4f1223e8SApple OSS Distributions /*
5087*4f1223e8SApple OSS Distributions * It is possible we started running as the process is aborted
5088*4f1223e8SApple OSS Distributions * due to termination; but, workq_kern_threadreq_permanent_bind
5089*4f1223e8SApple OSS Distributions * has not had a chance to bind us to the kqwl yet.
5090*4f1223e8SApple OSS Distributions *
5091*4f1223e8SApple OSS Distributions * We synchronize with it using workq lock.
5092*4f1223e8SApple OSS Distributions */
5093*4f1223e8SApple OSS Distributions proc_t p = current_proc();
5094*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr_fast(p);
5095*4f1223e8SApple OSS Distributions workq_lock_spin(wq);
5096*4f1223e8SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5097*4f1223e8SApple OSS Distributions workq_unlock(wq);
5098*4f1223e8SApple OSS Distributions
5099*4f1223e8SApple OSS Distributions /*
5100*4f1223e8SApple OSS Distributions * We do the bind commit ourselves if workq_kern_threadreq_permanent_bind
5101*4f1223e8SApple OSS Distributions * has not done it for us yet so our state is aligned with what the
5102*4f1223e8SApple OSS Distributions * termination path below expects.
5103*4f1223e8SApple OSS Distributions */
5104*4f1223e8SApple OSS Distributions kqueue_threadreq_bind_commit(p, get_machthread(uth));
5105*4f1223e8SApple OSS Distributions }
5106*4f1223e8SApple OSS Distributions workq_kern_bound_thread_terminate(uth->uu_kqr_bound);
5107*4f1223e8SApple OSS Distributions __builtin_unreachable();
5108*4f1223e8SApple OSS Distributions }
5109*4f1223e8SApple OSS Distributions
5110*4f1223e8SApple OSS Distributions /**
5111*4f1223e8SApple OSS Distributions * A parked bound thread wakes up. Not the first time.
5112*4f1223e8SApple OSS Distributions */
5113*4f1223e8SApple OSS Distributions __attribute__((noreturn, noinline))
5114*4f1223e8SApple OSS Distributions static void
workq_bound_thread_unpark_continue(void * parameter __unused,wait_result_t wr)5115*4f1223e8SApple OSS Distributions workq_bound_thread_unpark_continue(void *parameter __unused, wait_result_t wr)
5116*4f1223e8SApple OSS Distributions {
5117*4f1223e8SApple OSS Distributions struct uthread *uth = get_bsdthread_info(current_thread());
5118*4f1223e8SApple OSS Distributions assert(workq_thread_is_permanently_bound(uth));
5119*4f1223e8SApple OSS Distributions
5120*4f1223e8SApple OSS Distributions if (__probable(wr == THREAD_AWAKENED)) {
5121*4f1223e8SApple OSS Distributions /* At most one flag. */
5122*4f1223e8SApple OSS Distributions assert((uth->uu_workq_flags & (UT_WORKQ_RUNNING | UT_WORKQ_DYING))
5123*4f1223e8SApple OSS Distributions != (UT_WORKQ_RUNNING | UT_WORKQ_DYING));
5124*4f1223e8SApple OSS Distributions if (uth->uu_workq_flags & UT_WORKQ_RUNNING) {
5125*4f1223e8SApple OSS Distributions workq_bound_thread_setup_and_run(uth, WQ_SETUP_NONE);
5126*4f1223e8SApple OSS Distributions } else {
5127*4f1223e8SApple OSS Distributions assert(uth->uu_workq_flags & UT_WORKQ_DYING);
5128*4f1223e8SApple OSS Distributions }
5129*4f1223e8SApple OSS Distributions } else {
5130*4f1223e8SApple OSS Distributions assert(wr == THREAD_INTERRUPTED);
5131*4f1223e8SApple OSS Distributions }
5132*4f1223e8SApple OSS Distributions workq_kern_bound_thread_terminate(uth->uu_kqr_bound);
5133*4f1223e8SApple OSS Distributions __builtin_unreachable();
5134*4f1223e8SApple OSS Distributions }
5135*4f1223e8SApple OSS Distributions
5136*4f1223e8SApple OSS Distributions #pragma mark misc
5137*4f1223e8SApple OSS Distributions
5138*4f1223e8SApple OSS Distributions int
fill_procworkqueue(proc_t p,struct proc_workqueueinfo * pwqinfo)5139*4f1223e8SApple OSS Distributions fill_procworkqueue(proc_t p, struct proc_workqueueinfo * pwqinfo)
5140*4f1223e8SApple OSS Distributions {
5141*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
5142*4f1223e8SApple OSS Distributions int error = 0;
5143*4f1223e8SApple OSS Distributions int activecount;
5144*4f1223e8SApple OSS Distributions
5145*4f1223e8SApple OSS Distributions if (wq == NULL) {
5146*4f1223e8SApple OSS Distributions return EINVAL;
5147*4f1223e8SApple OSS Distributions }
5148*4f1223e8SApple OSS Distributions
5149*4f1223e8SApple OSS Distributions /*
5150*4f1223e8SApple OSS Distributions * This is sometimes called from interrupt context by the kperf sampler.
5151*4f1223e8SApple OSS Distributions * In that case, it's not safe to spin trying to take the lock since we
5152*4f1223e8SApple OSS Distributions * might already hold it. So, we just try-lock it and error out if it's
5153*4f1223e8SApple OSS Distributions * already held. Since this is just a debugging aid, and all our callers
5154*4f1223e8SApple OSS Distributions * are able to handle an error, that's fine.
5155*4f1223e8SApple OSS Distributions */
5156*4f1223e8SApple OSS Distributions bool locked = workq_lock_try(wq);
5157*4f1223e8SApple OSS Distributions if (!locked) {
5158*4f1223e8SApple OSS Distributions return EBUSY;
5159*4f1223e8SApple OSS Distributions }
5160*4f1223e8SApple OSS Distributions
5161*4f1223e8SApple OSS Distributions wq_thactive_t act = _wq_thactive(wq);
5162*4f1223e8SApple OSS Distributions activecount = _wq_thactive_aggregate_downto_qos(wq, act,
5163*4f1223e8SApple OSS Distributions WORKQ_THREAD_QOS_MIN, NULL, NULL);
5164*4f1223e8SApple OSS Distributions if (act & _wq_thactive_offset_for_qos(WORKQ_THREAD_QOS_MANAGER)) {
5165*4f1223e8SApple OSS Distributions activecount++;
5166*4f1223e8SApple OSS Distributions }
5167*4f1223e8SApple OSS Distributions pwqinfo->pwq_nthreads = wq->wq_nthreads;
5168*4f1223e8SApple OSS Distributions pwqinfo->pwq_runthreads = activecount;
5169*4f1223e8SApple OSS Distributions pwqinfo->pwq_blockedthreads = wq->wq_threads_scheduled - activecount;
5170*4f1223e8SApple OSS Distributions pwqinfo->pwq_state = 0;
5171*4f1223e8SApple OSS Distributions
5172*4f1223e8SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
5173*4f1223e8SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT;
5174*4f1223e8SApple OSS Distributions }
5175*4f1223e8SApple OSS Distributions
5176*4f1223e8SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
5177*4f1223e8SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_TOTAL_THREAD_LIMIT;
5178*4f1223e8SApple OSS Distributions }
5179*4f1223e8SApple OSS Distributions
5180*4f1223e8SApple OSS Distributions uint64_t total_cooperative_threads;
5181*4f1223e8SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_total(wq);
5182*4f1223e8SApple OSS Distributions if ((total_cooperative_threads == wq_cooperative_queue_max_size(wq)) &&
5183*4f1223e8SApple OSS Distributions workq_has_cooperative_thread_requests(wq)) {
5184*4f1223e8SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT;
5185*4f1223e8SApple OSS Distributions }
5186*4f1223e8SApple OSS Distributions
5187*4f1223e8SApple OSS Distributions if (wq->wq_exceeded_active_constrained_thread_limit) {
5188*4f1223e8SApple OSS Distributions pwqinfo->pwq_state |= WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT;
5189*4f1223e8SApple OSS Distributions }
5190*4f1223e8SApple OSS Distributions
5191*4f1223e8SApple OSS Distributions workq_unlock(wq);
5192*4f1223e8SApple OSS Distributions return error;
5193*4f1223e8SApple OSS Distributions }
5194*4f1223e8SApple OSS Distributions
5195*4f1223e8SApple OSS Distributions boolean_t
workqueue_get_pwq_exceeded(void * v,boolean_t * exceeded_total,boolean_t * exceeded_constrained)5196*4f1223e8SApple OSS Distributions workqueue_get_pwq_exceeded(void *v, boolean_t *exceeded_total,
5197*4f1223e8SApple OSS Distributions boolean_t *exceeded_constrained)
5198*4f1223e8SApple OSS Distributions {
5199*4f1223e8SApple OSS Distributions proc_t p = v;
5200*4f1223e8SApple OSS Distributions struct proc_workqueueinfo pwqinfo;
5201*4f1223e8SApple OSS Distributions int err;
5202*4f1223e8SApple OSS Distributions
5203*4f1223e8SApple OSS Distributions assert(p != NULL);
5204*4f1223e8SApple OSS Distributions assert(exceeded_total != NULL);
5205*4f1223e8SApple OSS Distributions assert(exceeded_constrained != NULL);
5206*4f1223e8SApple OSS Distributions
5207*4f1223e8SApple OSS Distributions err = fill_procworkqueue(p, &pwqinfo);
5208*4f1223e8SApple OSS Distributions if (err) {
5209*4f1223e8SApple OSS Distributions return FALSE;
5210*4f1223e8SApple OSS Distributions }
5211*4f1223e8SApple OSS Distributions if (!(pwqinfo.pwq_state & WQ_FLAGS_AVAILABLE)) {
5212*4f1223e8SApple OSS Distributions return FALSE;
5213*4f1223e8SApple OSS Distributions }
5214*4f1223e8SApple OSS Distributions
5215*4f1223e8SApple OSS Distributions *exceeded_total = (pwqinfo.pwq_state & WQ_EXCEEDED_TOTAL_THREAD_LIMIT);
5216*4f1223e8SApple OSS Distributions *exceeded_constrained = (pwqinfo.pwq_state & WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT);
5217*4f1223e8SApple OSS Distributions
5218*4f1223e8SApple OSS Distributions return TRUE;
5219*4f1223e8SApple OSS Distributions }
5220*4f1223e8SApple OSS Distributions
5221*4f1223e8SApple OSS Distributions uint64_t
workqueue_get_task_ss_flags_from_pwq_state_kdp(void * v)5222*4f1223e8SApple OSS Distributions workqueue_get_task_ss_flags_from_pwq_state_kdp(void * v)
5223*4f1223e8SApple OSS Distributions {
5224*4f1223e8SApple OSS Distributions static_assert((WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT << 17) ==
5225*4f1223e8SApple OSS Distributions kTaskWqExceededConstrainedThreadLimit);
5226*4f1223e8SApple OSS Distributions static_assert((WQ_EXCEEDED_TOTAL_THREAD_LIMIT << 17) ==
5227*4f1223e8SApple OSS Distributions kTaskWqExceededTotalThreadLimit);
5228*4f1223e8SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE << 17) == kTaskWqFlagsAvailable);
5229*4f1223e8SApple OSS Distributions static_assert(((uint64_t)WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT << 34) ==
5230*4f1223e8SApple OSS Distributions (uint64_t)kTaskWqExceededCooperativeThreadLimit);
5231*4f1223e8SApple OSS Distributions static_assert(((uint64_t)WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT << 34) ==
5232*4f1223e8SApple OSS Distributions (uint64_t)kTaskWqExceededActiveConstrainedThreadLimit);
5233*4f1223e8SApple OSS Distributions static_assert((WQ_FLAGS_AVAILABLE | WQ_EXCEEDED_TOTAL_THREAD_LIMIT |
5234*4f1223e8SApple OSS Distributions WQ_EXCEEDED_CONSTRAINED_THREAD_LIMIT |
5235*4f1223e8SApple OSS Distributions WQ_EXCEEDED_COOPERATIVE_THREAD_LIMIT |
5236*4f1223e8SApple OSS Distributions WQ_EXCEEDED_ACTIVE_CONSTRAINED_THREAD_LIMIT) == 0x1F);
5237*4f1223e8SApple OSS Distributions
5238*4f1223e8SApple OSS Distributions if (v == NULL) {
5239*4f1223e8SApple OSS Distributions return 0;
5240*4f1223e8SApple OSS Distributions }
5241*4f1223e8SApple OSS Distributions
5242*4f1223e8SApple OSS Distributions proc_t p = v;
5243*4f1223e8SApple OSS Distributions struct workqueue *wq = proc_get_wqptr(p);
5244*4f1223e8SApple OSS Distributions
5245*4f1223e8SApple OSS Distributions if (wq == NULL || workq_lock_is_acquired_kdp(wq)) {
5246*4f1223e8SApple OSS Distributions return 0;
5247*4f1223e8SApple OSS Distributions }
5248*4f1223e8SApple OSS Distributions
5249*4f1223e8SApple OSS Distributions uint64_t ss_flags = kTaskWqFlagsAvailable;
5250*4f1223e8SApple OSS Distributions
5251*4f1223e8SApple OSS Distributions if (wq->wq_constrained_threads_scheduled >= wq_max_constrained_threads) {
5252*4f1223e8SApple OSS Distributions ss_flags |= kTaskWqExceededConstrainedThreadLimit;
5253*4f1223e8SApple OSS Distributions }
5254*4f1223e8SApple OSS Distributions
5255*4f1223e8SApple OSS Distributions if (wq->wq_nthreads >= wq_max_threads) {
5256*4f1223e8SApple OSS Distributions ss_flags |= kTaskWqExceededTotalThreadLimit;
5257*4f1223e8SApple OSS Distributions }
5258*4f1223e8SApple OSS Distributions
5259*4f1223e8SApple OSS Distributions uint64_t total_cooperative_threads;
5260*4f1223e8SApple OSS Distributions total_cooperative_threads = workq_num_cooperative_threads_scheduled_to_qos_internal(wq,
5261*4f1223e8SApple OSS Distributions WORKQ_THREAD_QOS_MIN);
5262*4f1223e8SApple OSS Distributions if ((total_cooperative_threads == wq_cooperative_queue_max_size(wq)) &&
5263*4f1223e8SApple OSS Distributions workq_has_cooperative_thread_requests(wq)) {
5264*4f1223e8SApple OSS Distributions ss_flags |= kTaskWqExceededCooperativeThreadLimit;
5265*4f1223e8SApple OSS Distributions }
5266*4f1223e8SApple OSS Distributions
5267*4f1223e8SApple OSS Distributions if (wq->wq_exceeded_active_constrained_thread_limit) {
5268*4f1223e8SApple OSS Distributions ss_flags |= kTaskWqExceededActiveConstrainedThreadLimit;
5269*4f1223e8SApple OSS Distributions }
5270*4f1223e8SApple OSS Distributions
5271*4f1223e8SApple OSS Distributions return ss_flags;
5272*4f1223e8SApple OSS Distributions }
5273*4f1223e8SApple OSS Distributions
5274*4f1223e8SApple OSS Distributions void
workq_init(void)5275*4f1223e8SApple OSS Distributions workq_init(void)
5276*4f1223e8SApple OSS Distributions {
5277*4f1223e8SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_stalled_window.usecs,
5278*4f1223e8SApple OSS Distributions NSEC_PER_USEC, &wq_stalled_window.abstime);
5279*4f1223e8SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_reduce_pool_window.usecs,
5280*4f1223e8SApple OSS Distributions NSEC_PER_USEC, &wq_reduce_pool_window.abstime);
5281*4f1223e8SApple OSS Distributions clock_interval_to_absolutetime_interval(wq_max_timer_interval.usecs,
5282*4f1223e8SApple OSS Distributions NSEC_PER_USEC, &wq_max_timer_interval.abstime);
5283*4f1223e8SApple OSS Distributions
5284*4f1223e8SApple OSS Distributions thread_deallocate_daemon_register_queue(&workq_deallocate_queue,
5285*4f1223e8SApple OSS Distributions workq_deallocate_queue_invoke);
5286*4f1223e8SApple OSS Distributions }
5287